2024-11-19 08:44:36,245 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-19 08:44:36,263 main DEBUG Took 0.014828 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-19 08:44:36,263 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-19 08:44:36,264 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-19 08:44:36,265 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-19 08:44:36,266 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 08:44:36,275 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-19 08:44:36,295 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 08:44:36,296 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 08:44:36,297 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 08:44:36,298 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 08:44:36,299 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 08:44:36,300 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 08:44:36,301 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 08:44:36,301 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 08:44:36,302 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 08:44:36,303 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 08:44:36,304 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 08:44:36,304 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 08:44:36,310 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 08:44:36,311 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 08:44:36,313 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 08:44:36,314 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 08:44:36,315 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 08:44:36,316 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 08:44:36,317 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 08:44:36,317 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 08:44:36,318 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 08:44:36,318 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 08:44:36,319 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 08:44:36,320 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 08:44:36,320 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 08:44:36,321 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-19 08:44:36,323 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 08:44:36,325 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-19 08:44:36,328 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-19 08:44:36,329 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-19 08:44:36,331 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-19 08:44:36,331 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-19 08:44:36,342 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-19 08:44:36,345 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-19 08:44:36,347 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-19 08:44:36,348 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-19 08:44:36,348 main DEBUG createAppenders(={Console}) 2024-11-19 08:44:36,349 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-19 08:44:36,350 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-19 08:44:36,350 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-19 08:44:36,351 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-19 08:44:36,351 main DEBUG OutputStream closed 2024-11-19 08:44:36,352 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-19 08:44:36,352 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-19 08:44:36,352 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-19 08:44:36,457 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-19 08:44:36,460 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-19 08:44:36,462 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-19 08:44:36,464 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-19 08:44:36,465 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-19 08:44:36,465 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-19 08:44:36,465 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-19 08:44:36,466 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-19 08:44:36,466 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-19 08:44:36,467 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-19 08:44:36,467 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-19 08:44:36,468 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-19 08:44:36,468 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-19 08:44:36,469 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-19 08:44:36,469 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-19 08:44:36,469 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-19 08:44:36,470 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-19 08:44:36,471 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-19 08:44:36,474 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-19 08:44:36,474 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-19 08:44:36,475 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-19 08:44:36,476 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-19T08:44:36,813 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6 2024-11-19 08:44:36,817 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-19 08:44:36,817 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-19T08:44:36,830 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-19T08:44:36,889 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=266, ProcessCount=11, AvailableMemoryMB=7144 2024-11-19T08:44:36,893 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T08:44:36,916 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/cluster_627d3e3e-a1f0-5f83-8289-f2151de6d0af, deleteOnExit=true 2024-11-19T08:44:36,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T08:44:36,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/test.cache.data in system properties and HBase conf 2024-11-19T08:44:36,919 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T08:44:36,920 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/hadoop.log.dir in system properties and HBase conf 2024-11-19T08:44:36,921 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T08:44:36,922 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T08:44:36,922 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T08:44:37,042 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-19T08:44:37,156 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T08:44:37,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T08:44:37,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T08:44:37,163 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T08:44:37,164 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T08:44:37,164 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T08:44:37,165 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T08:44:37,166 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T08:44:37,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T08:44:37,168 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T08:44:37,168 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/nfs.dump.dir in system properties and HBase conf 2024-11-19T08:44:37,169 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/java.io.tmpdir in system properties and HBase conf 2024-11-19T08:44:37,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T08:44:37,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T08:44:37,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T08:44:37,727 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T08:44:38,334 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-19T08:44:38,417 INFO [Time-limited test {}] log.Log(170): Logging initialized @2943ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-19T08:44:38,503 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:44:38,582 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:44:38,617 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:44:38,617 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:44:38,619 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T08:44:38,636 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:44:38,639 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f2ab976{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:44:38,640 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46a86f8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:44:38,898 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c77270f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/java.io.tmpdir/jetty-localhost-40845-hadoop-hdfs-3_4_1-tests_jar-_-any-14608996568917360486/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T08:44:38,909 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c5145e6{HTTP/1.1, (http/1.1)}{localhost:40845} 2024-11-19T08:44:38,909 INFO [Time-limited test {}] server.Server(415): Started @3437ms 2024-11-19T08:44:38,948 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T08:44:39,506 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:44:39,519 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:44:39,523 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:44:39,523 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:44:39,523 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T08:44:39,524 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2aa5bb6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:44:39,525 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bdc1e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:44:39,641 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59e63bea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/java.io.tmpdir/jetty-localhost-40497-hadoop-hdfs-3_4_1-tests_jar-_-any-18241048951785105995/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:44:39,642 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@264a9341{HTTP/1.1, (http/1.1)}{localhost:40497} 2024-11-19T08:44:39,642 INFO [Time-limited test {}] server.Server(415): Started @4170ms 2024-11-19T08:44:39,705 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:44:39,855 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:44:39,862 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:44:39,864 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:44:39,864 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:44:39,864 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T08:44:39,866 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ae73635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:44:39,867 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@335a4f9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:44:39,990 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55d18735{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/java.io.tmpdir/jetty-localhost-34709-hadoop-hdfs-3_4_1-tests_jar-_-any-2527401007095202835/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:44:39,991 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15370523{HTTP/1.1, (http/1.1)}{localhost:34709} 2024-11-19T08:44:39,991 INFO [Time-limited test {}] server.Server(415): Started @4519ms 2024-11-19T08:44:39,995 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:44:40,882 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/cluster_627d3e3e-a1f0-5f83-8289-f2151de6d0af/data/data1/current/BP-1381190410-172.17.0.2-1732005877848/current, will proceed with Du for space computation calculation, 2024-11-19T08:44:40,882 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/cluster_627d3e3e-a1f0-5f83-8289-f2151de6d0af/data/data2/current/BP-1381190410-172.17.0.2-1732005877848/current, will proceed with Du for space computation calculation, 2024-11-19T08:44:40,882 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/cluster_627d3e3e-a1f0-5f83-8289-f2151de6d0af/data/data4/current/BP-1381190410-172.17.0.2-1732005877848/current, will proceed with Du for space computation calculation, 2024-11-19T08:44:40,882 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/cluster_627d3e3e-a1f0-5f83-8289-f2151de6d0af/data/data3/current/BP-1381190410-172.17.0.2-1732005877848/current, will proceed with Du for space computation calculation, 2024-11-19T08:44:40,922 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:44:40,923 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:44:40,981 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6 2024-11-19T08:44:40,991 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf523d7b156b743da with lease ID 0x2508feead16acee8: Processing first storage report for DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a from datanode DatanodeRegistration(127.0.0.1:43377, datanodeUuid=767fce67-0218-4400-aa28-1d407d9526c8, infoPort=40295, infoSecurePort=0, ipcPort=39649, storageInfo=lv=-57;cid=testClusterID;nsid=1960331729;c=1732005877848) 2024-11-19T08:44:40,993 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf523d7b156b743da with lease ID 0x2508feead16acee8: from storage DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a node DatanodeRegistration(127.0.0.1:43377, datanodeUuid=767fce67-0218-4400-aa28-1d407d9526c8, infoPort=40295, infoSecurePort=0, ipcPort=39649, storageInfo=lv=-57;cid=testClusterID;nsid=1960331729;c=1732005877848), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-19T08:44:40,994 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x25102c37a6c26b88 with lease ID 0x2508feead16acee7: Processing first storage report for DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210 from datanode DatanodeRegistration(127.0.0.1:44347, datanodeUuid=07ab3d2f-431a-46c2-8ad3-da62006b1003, infoPort=44247, infoSecurePort=0, ipcPort=39619, storageInfo=lv=-57;cid=testClusterID;nsid=1960331729;c=1732005877848) 2024-11-19T08:44:40,994 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x25102c37a6c26b88 with lease ID 0x2508feead16acee7: from storage DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210 node DatanodeRegistration(127.0.0.1:44347, datanodeUuid=07ab3d2f-431a-46c2-8ad3-da62006b1003, infoPort=44247, infoSecurePort=0, ipcPort=39619, storageInfo=lv=-57;cid=testClusterID;nsid=1960331729;c=1732005877848), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:44:40,995 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf523d7b156b743da with lease ID 0x2508feead16acee8: Processing first storage report for DS-489f98e1-6601-410f-8b32-863e5a04b18a from datanode DatanodeRegistration(127.0.0.1:43377, datanodeUuid=767fce67-0218-4400-aa28-1d407d9526c8, infoPort=40295, infoSecurePort=0, ipcPort=39649, storageInfo=lv=-57;cid=testClusterID;nsid=1960331729;c=1732005877848) 2024-11-19T08:44:40,995 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf523d7b156b743da with lease ID 0x2508feead16acee8: from storage DS-489f98e1-6601-410f-8b32-863e5a04b18a node DatanodeRegistration(127.0.0.1:43377, datanodeUuid=767fce67-0218-4400-aa28-1d407d9526c8, infoPort=40295, infoSecurePort=0, ipcPort=39649, storageInfo=lv=-57;cid=testClusterID;nsid=1960331729;c=1732005877848), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:44:40,996 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x25102c37a6c26b88 with lease ID 0x2508feead16acee7: Processing first storage report for DS-53055c9a-baa8-48e2-aa2b-4570822a1cb1 from datanode DatanodeRegistration(127.0.0.1:44347, datanodeUuid=07ab3d2f-431a-46c2-8ad3-da62006b1003, infoPort=44247, infoSecurePort=0, ipcPort=39619, storageInfo=lv=-57;cid=testClusterID;nsid=1960331729;c=1732005877848) 2024-11-19T08:44:40,996 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x25102c37a6c26b88 with lease ID 0x2508feead16acee7: from storage DS-53055c9a-baa8-48e2-aa2b-4570822a1cb1 node DatanodeRegistration(127.0.0.1:44347, datanodeUuid=07ab3d2f-431a-46c2-8ad3-da62006b1003, infoPort=44247, infoSecurePort=0, ipcPort=39619, storageInfo=lv=-57;cid=testClusterID;nsid=1960331729;c=1732005877848), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:44:41,098 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/cluster_627d3e3e-a1f0-5f83-8289-f2151de6d0af/zookeeper_0, clientPort=58282, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/cluster_627d3e3e-a1f0-5f83-8289-f2151de6d0af/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/cluster_627d3e3e-a1f0-5f83-8289-f2151de6d0af/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T08:44:41,116 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58282 2024-11-19T08:44:41,132 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:44:41,136 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:44:41,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741825_1001 (size=7) 2024-11-19T08:44:41,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741825_1001 (size=7) 2024-11-19T08:44:41,834 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad with version=8 2024-11-19T08:44:41,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/hbase-staging 2024-11-19T08:44:41,924 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-19T08:44:42,229 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3ab37fa97a98:0 server-side Connection retries=45 2024-11-19T08:44:42,241 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:44:42,241 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T08:44:42,246 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T08:44:42,246 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:44:42,247 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T08:44:42,431 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T08:44:42,497 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-19T08:44:42,509 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-19T08:44:42,514 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T08:44:42,548 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 14844 (auto-detected) 2024-11-19T08:44:42,549 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-19T08:44:42,576 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34827 2024-11-19T08:44:42,609 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34827 connecting to ZooKeeper ensemble=127.0.0.1:58282 2024-11-19T08:44:42,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:348270x0, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T08:44:42,740 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34827-0x101538df7ab0000 connected 2024-11-19T08:44:42,857 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:44:42,861 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:44:42,874 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:44:42,881 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad, hbase.cluster.distributed=false 2024-11-19T08:44:42,914 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T08:44:42,919 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34827 2024-11-19T08:44:42,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34827 2024-11-19T08:44:42,927 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34827 2024-11-19T08:44:42,927 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34827 2024-11-19T08:44:42,928 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34827 2024-11-19T08:44:43,066 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3ab37fa97a98:0 server-side Connection retries=45 2024-11-19T08:44:43,068 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:44:43,069 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T08:44:43,069 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T08:44:43,069 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:44:43,069 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T08:44:43,073 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T08:44:43,076 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T08:44:43,077 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40001 2024-11-19T08:44:43,079 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40001 connecting to ZooKeeper ensemble=127.0.0.1:58282 2024-11-19T08:44:43,081 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:44:43,086 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:44:43,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:400010x0, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T08:44:43,118 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:400010x0, quorum=127.0.0.1:58282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:44:43,119 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40001-0x101538df7ab0001 connected 2024-11-19T08:44:43,123 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T08:44:43,137 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T08:44:43,140 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40001-0x101538df7ab0001, quorum=127.0.0.1:58282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T08:44:43,146 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40001-0x101538df7ab0001, quorum=127.0.0.1:58282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T08:44:43,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40001 2024-11-19T08:44:43,154 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40001 2024-11-19T08:44:43,163 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40001 2024-11-19T08:44:43,167 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40001 2024-11-19T08:44:43,167 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40001 2024-11-19T08:44:43,186 DEBUG [M:0;3ab37fa97a98:34827 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3ab37fa97a98:34827 2024-11-19T08:44:43,190 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3ab37fa97a98,34827,1732005882034 2024-11-19T08:44:43,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40001-0x101538df7ab0001, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:44:43,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:44:43,221 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3ab37fa97a98,34827,1732005882034 2024-11-19T08:44:43,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40001-0x101538df7ab0001, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T08:44:43,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40001-0x101538df7ab0001, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:44:43,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:44:43,263 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T08:44:43,265 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3ab37fa97a98,34827,1732005882034 from backup master directory 2024-11-19T08:44:43,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3ab37fa97a98,34827,1732005882034 2024-11-19T08:44:43,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40001-0x101538df7ab0001, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:44:43,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:44:43,277 WARN [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T08:44:43,278 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3ab37fa97a98,34827,1732005882034 2024-11-19T08:44:43,280 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-19T08:44:43,283 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-19T08:44:43,354 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/hbase.id] with ID: 5b3012d5-f475-4e74-95b5-5192ae6e3658 2024-11-19T08:44:43,354 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/.tmp/hbase.id 2024-11-19T08:44:43,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741826_1002 (size=42) 2024-11-19T08:44:43,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741826_1002 (size=42) 2024-11-19T08:44:43,384 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/.tmp/hbase.id]:[hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/hbase.id] 2024-11-19T08:44:43,443 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:44:43,449 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T08:44:43,469 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 19ms. 2024-11-19T08:44:43,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40001-0x101538df7ab0001, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:44:43,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:44:43,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741827_1003 (size=196) 2024-11-19T08:44:43,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741827_1003 (size=196) 2024-11-19T08:44:43,537 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T08:44:43,539 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T08:44:43,545 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:44:43,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741828_1004 (size=1189) 2024-11-19T08:44:43,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741828_1004 (size=1189) 2024-11-19T08:44:43,614 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store 2024-11-19T08:44:43,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741829_1005 (size=34) 2024-11-19T08:44:43,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741829_1005 (size=34) 2024-11-19T08:44:43,660 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-19T08:44:43,664 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:44:43,666 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T08:44:43,666 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:44:43,666 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:44:43,668 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T08:44:43,668 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:44:43,669 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:44:43,670 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732005883666Disabling compacts and flushes for region at 1732005883666Disabling writes for close at 1732005883668 (+2 ms)Writing region close event to WAL at 1732005883669 (+1 ms)Closed at 1732005883669 2024-11-19T08:44:43,673 WARN [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/.initializing 2024-11-19T08:44:43,673 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/WALs/3ab37fa97a98,34827,1732005882034 2024-11-19T08:44:43,710 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C34827%2C1732005882034, suffix=, logDir=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/WALs/3ab37fa97a98,34827,1732005882034, archiveDir=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/oldWALs, maxLogs=10 2024-11-19T08:44:43,724 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C34827%2C1732005882034.1732005883718 2024-11-19T08:44:43,761 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/WALs/3ab37fa97a98,34827,1732005882034/3ab37fa97a98%2C34827%2C1732005882034.1732005883718 2024-11-19T08:44:43,776 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40295:40295),(127.0.0.1/127.0.0.1:44247:44247)] 2024-11-19T08:44:43,791 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T08:44:43,792 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:44:43,796 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:44:43,798 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:44:43,840 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:44:43,866 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T08:44:43,870 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:44:43,872 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:44:43,873 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:44:43,876 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T08:44:43,876 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:44:43,877 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:44:43,878 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:44:43,881 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T08:44:43,881 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:44:43,883 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:44:43,883 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:44:43,887 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T08:44:43,887 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:44:43,889 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:44:43,889 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:44:43,893 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:44:43,894 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:44:43,899 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:44:43,900 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:44:43,905 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T08:44:43,910 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:44:43,914 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T08:44:43,916 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=841306, jitterRate=0.06977701187133789}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T08:44:43,925 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732005883814Initializing all the Stores at 1732005883816 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005883817 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732005883818 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732005883818Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732005883819 (+1 ms)Cleaning up temporary data from old regions at 1732005883900 (+81 ms)Region opened successfully at 1732005883925 (+25 ms) 2024-11-19T08:44:43,930 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T08:44:43,964 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66560354, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3ab37fa97a98/172.17.0.2:0 2024-11-19T08:44:43,999 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T08:44:44,011 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T08:44:44,011 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T08:44:44,014 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T08:44:44,016 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-19T08:44:44,020 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-19T08:44:44,020 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T08:44:44,048 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T08:44:44,058 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T08:44:44,117 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T08:44:44,120 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T08:44:44,122 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T08:44:44,133 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T08:44:44,137 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T08:44:44,141 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T08:44:44,150 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T08:44:44,152 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T08:44:44,163 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T08:44:44,182 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T08:44:44,192 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T08:44:44,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T08:44:44,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40001-0x101538df7ab0001, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T08:44:44,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40001-0x101538df7ab0001, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:44:44,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:44:44,207 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3ab37fa97a98,34827,1732005882034, sessionid=0x101538df7ab0000, setting cluster-up flag (Was=false) 2024-11-19T08:44:44,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40001-0x101538df7ab0001, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:44:44,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:44:44,270 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T08:44:44,272 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3ab37fa97a98,34827,1732005882034 2024-11-19T08:44:44,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40001-0x101538df7ab0001, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:44:44,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:44:44,328 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T08:44:44,330 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3ab37fa97a98,34827,1732005882034 2024-11-19T08:44:44,335 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T08:44:44,372 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.HRegionServer(746): ClusterId : 5b3012d5-f475-4e74-95b5-5192ae6e3658 2024-11-19T08:44:44,376 DEBUG [RS:0;3ab37fa97a98:40001 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T08:44:44,415 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T08:44:44,426 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T08:44:44,435 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T08:44:44,439 DEBUG [RS:0;3ab37fa97a98:40001 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T08:44:44,439 DEBUG [RS:0;3ab37fa97a98:40001 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T08:44:44,443 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3ab37fa97a98,34827,1732005882034 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T08:44:44,451 DEBUG [RS:0;3ab37fa97a98:40001 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T08:44:44,452 DEBUG [RS:0;3ab37fa97a98:40001 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5470d508, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3ab37fa97a98/172.17.0.2:0 2024-11-19T08:44:44,453 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:44:44,453 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:44:44,453 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:44:44,454 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:44:44,454 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3ab37fa97a98:0, corePoolSize=10, maxPoolSize=10 2024-11-19T08:44:44,454 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:44:44,454 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=2, maxPoolSize=2 2024-11-19T08:44:44,454 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:44:44,466 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732005914466 2024-11-19T08:44:44,467 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:44:44,468 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T08:44:44,468 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T08:44:44,470 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T08:44:44,471 DEBUG [RS:0;3ab37fa97a98:40001 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3ab37fa97a98:40001 2024-11-19T08:44:44,474 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:44:44,474 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T08:44:44,474 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T08:44:44,474 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T08:44:44,475 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T08:44:44,475 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T08:44:44,475 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T08:44:44,475 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T08:44:44,475 DEBUG [RS:0;3ab37fa97a98:40001 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T08:44:44,476 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:44,479 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.HRegionServer(2659): reportForDuty to master=3ab37fa97a98,34827,1732005882034 with port=40001, startcode=1732005883022 2024-11-19T08:44:44,487 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T08:44:44,488 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T08:44:44,489 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T08:44:44,497 DEBUG [RS:0;3ab37fa97a98:40001 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T08:44:44,499 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T08:44:44,499 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T08:44:44,506 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732005884501,5,FailOnTimeoutGroup] 2024-11-19T08:44:44,510 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732005884507,5,FailOnTimeoutGroup] 2024-11-19T08:44:44,511 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:44,511 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T08:44:44,512 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:44,512 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:44,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741831_1007 (size=1321) 2024-11-19T08:44:44,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741831_1007 (size=1321) 2024-11-19T08:44:44,526 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T08:44:44,527 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad 2024-11-19T08:44:44,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741832_1008 (size=32) 2024-11-19T08:44:44,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741832_1008 (size=32) 2024-11-19T08:44:44,549 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:44:44,553 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T08:44:44,559 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T08:44:44,559 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:44:44,561 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:44:44,561 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T08:44:44,564 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T08:44:44,565 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:44:44,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:44:44,566 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T08:44:44,568 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T08:44:44,568 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:44:44,570 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:44:44,570 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T08:44:44,573 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55577, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T08:44:44,574 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T08:44:44,574 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:44:44,576 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:44:44,576 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T08:44:44,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740 2024-11-19T08:44:44,579 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740 2024-11-19T08:44:44,580 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34827 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3ab37fa97a98,40001,1732005883022 2024-11-19T08:44:44,582 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34827 {}] master.ServerManager(517): Registering regionserver=3ab37fa97a98,40001,1732005883022 2024-11-19T08:44:44,583 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T08:44:44,583 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T08:44:44,584 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T08:44:44,587 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T08:44:44,596 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T08:44:44,597 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=857691, jitterRate=0.09061144292354584}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T08:44:44,599 DEBUG [RS:0;3ab37fa97a98:40001 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad 2024-11-19T08:44:44,600 DEBUG [RS:0;3ab37fa97a98:40001 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34933 2024-11-19T08:44:44,600 DEBUG [RS:0;3ab37fa97a98:40001 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T08:44:44,603 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732005884549Initializing all the Stores at 1732005884551 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005884551Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005884552 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732005884552Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005884552Cleaning up temporary data from old regions at 1732005884583 (+31 ms)Region opened successfully at 1732005884602 (+19 ms) 2024-11-19T08:44:44,603 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T08:44:44,603 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T08:44:44,603 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T08:44:44,603 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T08:44:44,603 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T08:44:44,605 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T08:44:44,606 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732005884603Disabling compacts and flushes for region at 1732005884603Disabling writes for close at 1732005884603Writing region close event to WAL at 1732005884605 (+2 ms)Closed at 1732005884605 2024-11-19T08:44:44,610 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:44:44,610 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T08:44:44,619 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T08:44:44,631 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T08:44:44,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T08:44:44,634 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T08:44:44,635 DEBUG [RS:0;3ab37fa97a98:40001 {}] zookeeper.ZKUtil(111): regionserver:40001-0x101538df7ab0001, quorum=127.0.0.1:58282, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3ab37fa97a98,40001,1732005883022 2024-11-19T08:44:44,636 WARN [RS:0;3ab37fa97a98:40001 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T08:44:44,636 INFO [RS:0;3ab37fa97a98:40001 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:44:44,636 DEBUG [RS:0;3ab37fa97a98:40001 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022 2024-11-19T08:44:44,637 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3ab37fa97a98,40001,1732005883022] 2024-11-19T08:44:44,664 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T08:44:44,683 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T08:44:44,687 INFO [RS:0;3ab37fa97a98:40001 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T08:44:44,687 INFO [RS:0;3ab37fa97a98:40001 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:44,689 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T08:44:44,694 INFO [RS:0;3ab37fa97a98:40001 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T08:44:44,695 INFO [RS:0;3ab37fa97a98:40001 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:44,696 DEBUG [RS:0;3ab37fa97a98:40001 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:44:44,696 DEBUG [RS:0;3ab37fa97a98:40001 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:44:44,696 DEBUG [RS:0;3ab37fa97a98:40001 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:44:44,696 DEBUG [RS:0;3ab37fa97a98:40001 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:44:44,696 DEBUG [RS:0;3ab37fa97a98:40001 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:44:44,696 DEBUG [RS:0;3ab37fa97a98:40001 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3ab37fa97a98:0, corePoolSize=2, maxPoolSize=2 2024-11-19T08:44:44,697 DEBUG [RS:0;3ab37fa97a98:40001 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:44:44,697 DEBUG [RS:0;3ab37fa97a98:40001 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:44:44,697 DEBUG [RS:0;3ab37fa97a98:40001 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:44:44,697 DEBUG [RS:0;3ab37fa97a98:40001 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:44:44,697 DEBUG [RS:0;3ab37fa97a98:40001 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:44:44,697 DEBUG [RS:0;3ab37fa97a98:40001 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:44:44,697 DEBUG [RS:0;3ab37fa97a98:40001 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3ab37fa97a98:0, corePoolSize=3, maxPoolSize=3 2024-11-19T08:44:44,698 DEBUG [RS:0;3ab37fa97a98:40001 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0, corePoolSize=3, maxPoolSize=3 2024-11-19T08:44:44,699 INFO [RS:0;3ab37fa97a98:40001 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:44,699 INFO [RS:0;3ab37fa97a98:40001 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:44,699 INFO [RS:0;3ab37fa97a98:40001 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:44,699 INFO [RS:0;3ab37fa97a98:40001 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:44,699 INFO [RS:0;3ab37fa97a98:40001 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:44,699 INFO [RS:0;3ab37fa97a98:40001 {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,40001,1732005883022-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T08:44:44,718 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T08:44:44,720 INFO [RS:0;3ab37fa97a98:40001 {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,40001,1732005883022-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:44,720 INFO [RS:0;3ab37fa97a98:40001 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:44,720 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.Replication(171): 3ab37fa97a98,40001,1732005883022 started 2024-11-19T08:44:44,742 INFO [RS:0;3ab37fa97a98:40001 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:44,742 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.HRegionServer(1482): Serving as 3ab37fa97a98,40001,1732005883022, RpcServer on 3ab37fa97a98/172.17.0.2:40001, sessionid=0x101538df7ab0001 2024-11-19T08:44:44,743 DEBUG [RS:0;3ab37fa97a98:40001 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T08:44:44,743 DEBUG [RS:0;3ab37fa97a98:40001 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3ab37fa97a98,40001,1732005883022 2024-11-19T08:44:44,743 DEBUG [RS:0;3ab37fa97a98:40001 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3ab37fa97a98,40001,1732005883022' 2024-11-19T08:44:44,743 DEBUG [RS:0;3ab37fa97a98:40001 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T08:44:44,744 DEBUG [RS:0;3ab37fa97a98:40001 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T08:44:44,745 DEBUG [RS:0;3ab37fa97a98:40001 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T08:44:44,745 DEBUG [RS:0;3ab37fa97a98:40001 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T08:44:44,745 DEBUG [RS:0;3ab37fa97a98:40001 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3ab37fa97a98,40001,1732005883022 2024-11-19T08:44:44,745 DEBUG [RS:0;3ab37fa97a98:40001 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3ab37fa97a98,40001,1732005883022' 2024-11-19T08:44:44,745 DEBUG [RS:0;3ab37fa97a98:40001 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T08:44:44,746 DEBUG [RS:0;3ab37fa97a98:40001 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T08:44:44,746 DEBUG [RS:0;3ab37fa97a98:40001 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T08:44:44,746 INFO [RS:0;3ab37fa97a98:40001 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T08:44:44,747 INFO [RS:0;3ab37fa97a98:40001 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T08:44:44,786 WARN [3ab37fa97a98:34827 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T08:44:44,854 INFO [RS:0;3ab37fa97a98:40001 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C40001%2C1732005883022, suffix=, logDir=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022, archiveDir=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/oldWALs, maxLogs=32 2024-11-19T08:44:44,856 INFO [RS:0;3ab37fa97a98:40001 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C40001%2C1732005883022.1732005884856 2024-11-19T08:44:44,866 INFO [RS:0;3ab37fa97a98:40001 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005884856 2024-11-19T08:44:44,867 DEBUG [RS:0;3ab37fa97a98:40001 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40295:40295),(127.0.0.1/127.0.0.1:44247:44247)] 2024-11-19T08:44:45,038 DEBUG [3ab37fa97a98:34827 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T08:44:45,051 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3ab37fa97a98,40001,1732005883022 2024-11-19T08:44:45,057 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3ab37fa97a98,40001,1732005883022, state=OPENING 2024-11-19T08:44:45,125 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T08:44:45,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:44:45,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40001-0x101538df7ab0001, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:44:45,138 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:44:45,138 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:44:45,139 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T08:44:45,141 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3ab37fa97a98,40001,1732005883022}] 2024-11-19T08:44:45,318 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T08:44:45,321 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37473, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T08:44:45,336 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T08:44:45,337 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:44:45,341 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C40001%2C1732005883022.meta, suffix=.meta, logDir=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022, archiveDir=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/oldWALs, maxLogs=32 2024-11-19T08:44:45,344 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C40001%2C1732005883022.meta.1732005885343.meta 2024-11-19T08:44:45,355 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.meta.1732005885343.meta 2024-11-19T08:44:45,360 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40295:40295),(127.0.0.1/127.0.0.1:44247:44247)] 2024-11-19T08:44:45,361 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T08:44:45,364 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T08:44:45,368 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T08:44:45,373 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T08:44:45,378 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T08:44:45,379 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:44:45,379 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T08:44:45,379 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T08:44:45,383 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T08:44:45,385 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T08:44:45,385 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:44:45,387 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:44:45,387 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T08:44:45,389 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T08:44:45,390 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:44:45,391 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:44:45,391 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T08:44:45,393 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T08:44:45,393 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:44:45,394 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:44:45,395 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T08:44:45,397 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T08:44:45,397 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:44:45,399 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:44:45,400 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T08:44:45,401 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740 2024-11-19T08:44:45,404 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740 2024-11-19T08:44:45,407 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T08:44:45,408 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T08:44:45,409 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T08:44:45,414 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T08:44:45,416 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=884439, jitterRate=0.12462317943572998}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T08:44:45,417 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T08:44:45,419 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732005885380Writing region info on filesystem at 1732005885380Initializing all the Stores at 1732005885382 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005885382Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005885382Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732005885382Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005885383 (+1 ms)Cleaning up temporary data from old regions at 1732005885408 (+25 ms)Running coprocessor post-open hooks at 1732005885417 (+9 ms)Region opened successfully at 1732005885419 (+2 ms) 2024-11-19T08:44:45,427 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732005885309 2024-11-19T08:44:45,440 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T08:44:45,441 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T08:44:45,443 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3ab37fa97a98,40001,1732005883022 2024-11-19T08:44:45,446 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3ab37fa97a98,40001,1732005883022, state=OPEN 2024-11-19T08:44:45,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40001-0x101538df7ab0001, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T08:44:45,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T08:44:45,543 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:44:45,543 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:44:45,543 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3ab37fa97a98,40001,1732005883022 2024-11-19T08:44:45,549 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T08:44:45,549 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3ab37fa97a98,40001,1732005883022 in 403 msec 2024-11-19T08:44:45,557 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T08:44:45,557 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 931 msec 2024-11-19T08:44:45,560 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:44:45,560 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T08:44:45,586 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T08:44:45,588 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3ab37fa97a98,40001,1732005883022, seqNum=-1] 2024-11-19T08:44:45,611 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T08:44:45,615 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57839, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T08:44:45,639 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2680 sec 2024-11-19T08:44:45,639 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732005885639, completionTime=-1 2024-11-19T08:44:45,643 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T08:44:45,643 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T08:44:45,676 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T08:44:45,676 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732005945676 2024-11-19T08:44:45,676 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732006005676 2024-11-19T08:44:45,676 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 32 msec 2024-11-19T08:44:45,680 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,34827,1732005882034-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:45,680 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,34827,1732005882034-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:45,680 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,34827,1732005882034-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:45,682 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3ab37fa97a98:34827, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:45,683 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:45,684 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:45,691 DEBUG [master/3ab37fa97a98:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T08:44:45,718 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.440sec 2024-11-19T08:44:45,719 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T08:44:45,721 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T08:44:45,722 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T08:44:45,723 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T08:44:45,724 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T08:44:45,725 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,34827,1732005882034-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T08:44:45,725 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,34827,1732005882034-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T08:44:45,733 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T08:44:45,735 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T08:44:45,735 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,34827,1732005882034-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:44:45,784 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@116576a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:44:45,789 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-19T08:44:45,789 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-19T08:44:45,794 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3ab37fa97a98,34827,-1 for getting cluster id 2024-11-19T08:44:45,798 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T08:44:45,808 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5b3012d5-f475-4e74-95b5-5192ae6e3658' 2024-11-19T08:44:45,812 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T08:44:45,813 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5b3012d5-f475-4e74-95b5-5192ae6e3658" 2024-11-19T08:44:45,816 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@120d12d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:44:45,816 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3ab37fa97a98,34827,-1] 2024-11-19T08:44:45,821 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T08:44:45,823 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:44:45,826 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47420, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T08:44:45,830 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@151fab88, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:44:45,831 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T08:44:45,840 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3ab37fa97a98,40001,1732005883022, seqNum=-1] 2024-11-19T08:44:45,841 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T08:44:45,844 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38666, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T08:44:45,870 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3ab37fa97a98,34827,1732005882034 2024-11-19T08:44:45,870 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:44:45,877 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T08:44:45,881 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T08:44:45,886 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 3ab37fa97a98,34827,1732005882034 2024-11-19T08:44:45,909 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2bfb5e00 2024-11-19T08:44:45,910 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T08:44:45,917 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47434, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T08:44:45,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T08:44:45,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T08:44:45,925 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T08:44:45,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-19T08:44:45,938 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T08:44:45,949 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-19T08:44:45,949 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:44:45,955 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T08:44:45,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T08:44:46,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741835_1011 (size=389) 2024-11-19T08:44:46,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741835_1011 (size=389) 2024-11-19T08:44:46,405 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 2aa178a41813493221279ba2be8d3a33, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad 2024-11-19T08:44:46,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741836_1012 (size=72) 2024-11-19T08:44:46,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741836_1012 (size=72) 2024-11-19T08:44:46,417 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:44:46,417 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 2aa178a41813493221279ba2be8d3a33, disabling compactions & flushes 2024-11-19T08:44:46,417 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33. 2024-11-19T08:44:46,417 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33. 2024-11-19T08:44:46,417 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33. after waiting 0 ms 2024-11-19T08:44:46,417 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33. 2024-11-19T08:44:46,417 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33. 2024-11-19T08:44:46,418 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 2aa178a41813493221279ba2be8d3a33: Waiting for close lock at 1732005886417Disabling compacts and flushes for region at 1732005886417Disabling writes for close at 1732005886417Writing region close event to WAL at 1732005886417Closed at 1732005886417 2024-11-19T08:44:46,420 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T08:44:46,425 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732005886420"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732005886420"}]},"ts":"1732005886420"} 2024-11-19T08:44:46,430 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T08:44:46,432 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T08:44:46,435 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732005886432"}]},"ts":"1732005886432"} 2024-11-19T08:44:46,440 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-19T08:44:46,442 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=2aa178a41813493221279ba2be8d3a33, ASSIGN}] 2024-11-19T08:44:46,445 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=2aa178a41813493221279ba2be8d3a33, ASSIGN 2024-11-19T08:44:46,446 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=2aa178a41813493221279ba2be8d3a33, ASSIGN; state=OFFLINE, location=3ab37fa97a98,40001,1732005883022; forceNewPlan=false, retain=false 2024-11-19T08:44:46,598 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2aa178a41813493221279ba2be8d3a33, regionState=OPENING, regionLocation=3ab37fa97a98,40001,1732005883022 2024-11-19T08:44:46,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=2aa178a41813493221279ba2be8d3a33, ASSIGN because future has completed 2024-11-19T08:44:46,604 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2aa178a41813493221279ba2be8d3a33, server=3ab37fa97a98,40001,1732005883022}] 2024-11-19T08:44:46,766 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33. 2024-11-19T08:44:46,766 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 2aa178a41813493221279ba2be8d3a33, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33.', STARTKEY => '', ENDKEY => ''} 2024-11-19T08:44:46,766 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 2aa178a41813493221279ba2be8d3a33 2024-11-19T08:44:46,767 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:44:46,767 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 2aa178a41813493221279ba2be8d3a33 2024-11-19T08:44:46,767 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 2aa178a41813493221279ba2be8d3a33 2024-11-19T08:44:46,769 INFO [StoreOpener-2aa178a41813493221279ba2be8d3a33-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 2aa178a41813493221279ba2be8d3a33 2024-11-19T08:44:46,772 INFO [StoreOpener-2aa178a41813493221279ba2be8d3a33-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2aa178a41813493221279ba2be8d3a33 columnFamilyName info 2024-11-19T08:44:46,772 DEBUG [StoreOpener-2aa178a41813493221279ba2be8d3a33-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:44:46,773 INFO [StoreOpener-2aa178a41813493221279ba2be8d3a33-1 {}] regionserver.HStore(327): Store=2aa178a41813493221279ba2be8d3a33/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:44:46,774 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 2aa178a41813493221279ba2be8d3a33 2024-11-19T08:44:46,775 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33 2024-11-19T08:44:46,777 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33 2024-11-19T08:44:46,778 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 2aa178a41813493221279ba2be8d3a33 2024-11-19T08:44:46,778 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 2aa178a41813493221279ba2be8d3a33 2024-11-19T08:44:46,781 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 2aa178a41813493221279ba2be8d3a33 2024-11-19T08:44:46,784 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T08:44:46,785 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 2aa178a41813493221279ba2be8d3a33; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=742621, jitterRate=-0.05570860207080841}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T08:44:46,785 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2aa178a41813493221279ba2be8d3a33 2024-11-19T08:44:46,786 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 2aa178a41813493221279ba2be8d3a33: Running coprocessor pre-open hook at 1732005886767Writing region info on filesystem at 1732005886767Initializing all the Stores at 1732005886769 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732005886769Cleaning up temporary data from old regions at 1732005886778 (+9 ms)Running coprocessor post-open hooks at 1732005886785 (+7 ms)Region opened successfully at 1732005886786 (+1 ms) 2024-11-19T08:44:46,788 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33., pid=6, masterSystemTime=1732005886760 2024-11-19T08:44:46,792 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33. 2024-11-19T08:44:46,792 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33. 2024-11-19T08:44:46,794 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2aa178a41813493221279ba2be8d3a33, regionState=OPEN, openSeqNum=2, regionLocation=3ab37fa97a98,40001,1732005883022 2024-11-19T08:44:46,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2aa178a41813493221279ba2be8d3a33, server=3ab37fa97a98,40001,1732005883022 because future has completed 2024-11-19T08:44:46,806 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T08:44:46,806 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 2aa178a41813493221279ba2be8d3a33, server=3ab37fa97a98,40001,1732005883022 in 197 msec 2024-11-19T08:44:46,810 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T08:44:46,810 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=2aa178a41813493221279ba2be8d3a33, ASSIGN in 364 msec 2024-11-19T08:44:46,812 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T08:44:46,813 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732005886812"}]},"ts":"1732005886812"} 2024-11-19T08:44:46,817 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-19T08:44:46,820 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T08:44:46,825 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 891 msec 2024-11-19T08:44:50,891 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-19T08:44:50,960 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T08:44:50,961 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-19T08:44:52,493 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T08:44:52,494 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T08:44:52,496 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-19T08:44:52,496 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-19T08:44:52,497 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T08:44:52,497 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T08:44:52,497 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T08:44:52,497 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-19T08:44:55,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34827 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T08:44:55,983 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-19T08:44:55,987 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-19T08:44:55,996 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-19T08:44:55,997 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33. 2024-11-19T08:44:55,998 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C40001%2C1732005883022.1732005895997 2024-11-19T08:44:56,007 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:44:56,007 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:44:56,008 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:44:56,008 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:44:56,008 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:44:56,008 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005884856 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005895997 2024-11-19T08:44:56,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741833_1009 (size=451) 2024-11-19T08:44:56,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741833_1009 (size=451) 2024-11-19T08:44:56,027 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44247:44247),(127.0.0.1/127.0.0.1:40295:40295)] 2024-11-19T08:44:56,027 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005884856 to hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/oldWALs/3ab37fa97a98%2C40001%2C1732005883022.1732005884856 2024-11-19T08:44:56,036 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33., hostname=3ab37fa97a98,40001,1732005883022, seqNum=2] 2024-11-19T08:45:08,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40001 {}] regionserver.HRegion(8855): Flush requested on 2aa178a41813493221279ba2be8d3a33 2024-11-19T08:45:08,086 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2aa178a41813493221279ba2be8d3a33 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T08:45:08,144 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/.tmp/info/ed5645d3cd0746bcb63f6f0b5088577e is 1080, key is row0001/info:/1732005896039/Put/seqid=0 2024-11-19T08:45:08,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741838_1014 (size=12509) 2024-11-19T08:45:08,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741838_1014 (size=12509) 2024-11-19T08:45:08,157 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/.tmp/info/ed5645d3cd0746bcb63f6f0b5088577e 2024-11-19T08:45:08,198 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/.tmp/info/ed5645d3cd0746bcb63f6f0b5088577e as hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/ed5645d3cd0746bcb63f6f0b5088577e 2024-11-19T08:45:08,209 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/ed5645d3cd0746bcb63f6f0b5088577e, entries=7, sequenceid=11, filesize=12.2 K 2024-11-19T08:45:08,215 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 2aa178a41813493221279ba2be8d3a33 in 132ms, sequenceid=11, compaction requested=false 2024-11-19T08:45:08,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2aa178a41813493221279ba2be8d3a33: 2024-11-19T08:45:10,977 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T08:45:16,099 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C40001%2C1732005883022.1732005916099 2024-11-19T08:45:16,309 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK], DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK]] 2024-11-19T08:45:16,309 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:16,310 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:16,310 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:16,310 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:16,310 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:16,311 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005895997 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005916099 2024-11-19T08:45:16,312 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40295:40295),(127.0.0.1/127.0.0.1:44247:44247)] 2024-11-19T08:45:16,313 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005895997 is not closed yet, will try archiving it next time 2024-11-19T08:45:16,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741837_1013 (size=12399) 2024-11-19T08:45:16,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741837_1013 (size=12399) 2024-11-19T08:45:16,517 INFO [FSHLog-0-hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad-prefix:3ab37fa97a98,40001,1732005883022 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK], DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK]] 2024-11-19T08:45:18,721 INFO [FSHLog-0-hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad-prefix:3ab37fa97a98,40001,1732005883022 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK], DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK]] 2024-11-19T08:45:20,925 INFO [FSHLog-0-hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad-prefix:3ab37fa97a98,40001,1732005883022 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK], DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK]] 2024-11-19T08:45:23,129 INFO [FSHLog-0-hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad-prefix:3ab37fa97a98,40001,1732005883022 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK], DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK]] 2024-11-19T08:45:23,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40001 {}] regionserver.HRegion(8855): Flush requested on 2aa178a41813493221279ba2be8d3a33 2024-11-19T08:45:23,129 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2aa178a41813493221279ba2be8d3a33 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T08:45:23,331 INFO [FSHLog-0-hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad-prefix:3ab37fa97a98,40001,1732005883022 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK], DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK]] 2024-11-19T08:45:23,338 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/.tmp/info/a81ac768799a4ad489cfd9ff4205b7dc is 1080, key is row0008/info:/1732005910085/Put/seqid=0 2024-11-19T08:45:23,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741840_1016 (size=12509) 2024-11-19T08:45:23,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741840_1016 (size=12509) 2024-11-19T08:45:23,370 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/.tmp/info/a81ac768799a4ad489cfd9ff4205b7dc 2024-11-19T08:45:23,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/.tmp/info/a81ac768799a4ad489cfd9ff4205b7dc as hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/a81ac768799a4ad489cfd9ff4205b7dc 2024-11-19T08:45:23,403 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/a81ac768799a4ad489cfd9ff4205b7dc, entries=7, sequenceid=21, filesize=12.2 K 2024-11-19T08:45:23,605 INFO [FSHLog-0-hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad-prefix:3ab37fa97a98,40001,1732005883022 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK], DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK]] 2024-11-19T08:45:23,605 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 2aa178a41813493221279ba2be8d3a33 in 476ms, sequenceid=21, compaction requested=false 2024-11-19T08:45:23,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2aa178a41813493221279ba2be8d3a33: 2024-11-19T08:45:23,605 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-19T08:45:23,606 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:45:23,606 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/ed5645d3cd0746bcb63f6f0b5088577e because midkey is the same as first or last row 2024-11-19T08:45:25,334 INFO [FSHLog-0-hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad-prefix:3ab37fa97a98,40001,1732005883022 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK], DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK]] 2024-11-19T08:45:26,023 INFO [master/3ab37fa97a98:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T08:45:26,023 INFO [master/3ab37fa97a98:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T08:45:27,583 INFO [FSHLog-0-hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad-prefix:3ab37fa97a98,40001,1732005883022 {}] wal.AbstractFSWAL(1368): Slow sync cost: 243 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK], DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK]] 2024-11-19T08:45:27,589 WARN [FSHLog-0-hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad-prefix:3ab37fa97a98,40001,1732005883022 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK], DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK]] 2024-11-19T08:45:27,591 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3ab37fa97a98%2C40001%2C1732005883022:(num 1732005916099) roll requested 2024-11-19T08:45:27,592 INFO [regionserver/3ab37fa97a98:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C40001%2C1732005883022.1732005927591 2024-11-19T08:45:27,803 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 209 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK], DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK]] 2024-11-19T08:45:27,804 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:27,804 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:27,804 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:27,804 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:27,804 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:27,805 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005916099 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005927591 2024-11-19T08:45:27,807 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44247:44247),(127.0.0.1/127.0.0.1:40295:40295)] 2024-11-19T08:45:27,807 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005916099 is not closed yet, will try archiving it next time 2024-11-19T08:45:27,807 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005895997 to hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/oldWALs/3ab37fa97a98%2C40001%2C1732005883022.1732005895997 2024-11-19T08:45:27,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741839_1015 (size=7739) 2024-11-19T08:45:27,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741839_1015 (size=7739) 2024-11-19T08:45:29,788 INFO [FSHLog-0-hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad-prefix:3ab37fa97a98,40001,1732005883022 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK], DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK]] 2024-11-19T08:45:31,767 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 2aa178a41813493221279ba2be8d3a33, had cached 0 bytes from a total of 25018 2024-11-19T08:45:31,993 INFO [FSHLog-0-hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad-prefix:3ab37fa97a98,40001,1732005883022 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK], DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK]] 2024-11-19T08:45:34,199 INFO [FSHLog-0-hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad-prefix:3ab37fa97a98,40001,1732005883022 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK], DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK]] 2024-11-19T08:45:36,404 INFO [FSHLog-0-hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad-prefix:3ab37fa97a98,40001,1732005883022 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK], DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK]] 2024-11-19T08:45:38,406 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T08:45:38,407 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C40001%2C1732005883022.1732005938406 2024-11-19T08:45:40,977 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T08:45:43,467 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5057 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK], DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK]] 2024-11-19T08:45:43,470 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5057 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK], DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK]] 2024-11-19T08:45:43,470 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3ab37fa97a98%2C40001%2C1732005883022:(num 1732005938406) roll requested 2024-11-19T08:45:43,470 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:43,470 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:43,471 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:43,471 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:43,471 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:43,472 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005927591 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005938406 2024-11-19T08:45:43,474 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44247:44247),(127.0.0.1/127.0.0.1:40295:40295)] 2024-11-19T08:45:43,474 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005927591 is not closed yet, will try archiving it next time 2024-11-19T08:45:43,475 INFO [regionserver/3ab37fa97a98:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C40001%2C1732005883022.1732005943474 2024-11-19T08:45:43,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741841_1017 (size=4753) 2024-11-19T08:45:43,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741841_1017 (size=4753) 2024-11-19T08:45:48,529 INFO [FSHLog-0-hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad-prefix:3ab37fa97a98,40001,1732005883022 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5051 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK], DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK]] 2024-11-19T08:45:48,529 WARN [FSHLog-0-hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad-prefix:3ab37fa97a98,40001,1732005883022 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5051 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK], DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK]] 2024-11-19T08:45:48,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40001 {}] regionserver.HRegion(8855): Flush requested on 2aa178a41813493221279ba2be8d3a33 2024-11-19T08:45:48,530 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2aa178a41813493221279ba2be8d3a33 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T08:45:48,537 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5055 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK], DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK]] 2024-11-19T08:45:48,537 WARN [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5055 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK], DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK]] 2024-11-19T08:45:50,532 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T08:45:53,585 INFO [FSHLog-0-hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad-prefix:3ab37fa97a98,40001,1732005883022 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5052 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK], DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK]] 2024-11-19T08:45:53,585 WARN [FSHLog-0-hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad-prefix:3ab37fa97a98,40001,1732005883022 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5052 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK], DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK]] 2024-11-19T08:45:53,585 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:53,585 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:53,586 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:53,586 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:53,586 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:53,586 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005938406 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005943474 2024-11-19T08:45:53,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741842_1018 (size=1569) 2024-11-19T08:45:53,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741842_1018 (size=1569) 2024-11-19T08:45:53,589 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44247:44247),(127.0.0.1/127.0.0.1:40295:40295)] 2024-11-19T08:45:53,589 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005938406 is not closed yet, will try archiving it next time 2024-11-19T08:45:53,589 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3ab37fa97a98%2C40001%2C1732005883022:(num 1732005943474) roll requested 2024-11-19T08:45:53,590 INFO [regionserver/3ab37fa97a98:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C40001%2C1732005883022.1732005953590 2024-11-19T08:45:53,592 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/.tmp/info/9f882b6fe4934739bc1ed4a0a13dc2b8 is 1080, key is row0015/info:/1732005925132/Put/seqid=0 2024-11-19T08:45:53,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741844_1020 (size=12509) 2024-11-19T08:45:53,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741844_1020 (size=12509) 2024-11-19T08:45:53,599 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/.tmp/info/9f882b6fe4934739bc1ed4a0a13dc2b8 2024-11-19T08:45:53,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/.tmp/info/9f882b6fe4934739bc1ed4a0a13dc2b8 as hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/9f882b6fe4934739bc1ed4a0a13dc2b8 2024-11-19T08:45:53,618 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/9f882b6fe4934739bc1ed4a0a13dc2b8, entries=7, sequenceid=31, filesize=12.2 K 2024-11-19T08:45:58,643 INFO [FSHLog-0-hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad-prefix:3ab37fa97a98,40001,1732005883022 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5024 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK], DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK]] 2024-11-19T08:45:58,644 WARN [FSHLog-0-hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad-prefix:3ab37fa97a98,40001,1732005883022 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5024 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK], DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK]] 2024-11-19T08:45:58,644 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 2aa178a41813493221279ba2be8d3a33 in 10113ms, sequenceid=31, compaction requested=true 2024-11-19T08:45:58,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2aa178a41813493221279ba2be8d3a33: 2024-11-19T08:45:58,645 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-19T08:45:58,645 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:45:58,646 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/ed5645d3cd0746bcb63f6f0b5088577e because midkey is the same as first or last row 2024-11-19T08:45:58,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2aa178a41813493221279ba2be8d3a33:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T08:45:58,650 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5058 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK], DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK]] 2024-11-19T08:45:58,651 WARN [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5058 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44347,DS-d5b2b5a0-a0c4-4063-8046-f0d8958d6210,DISK], DatanodeInfoWithStorage[127.0.0.1:43377,DS-64f0e4f1-8925-4cfe-9da3-bf02d431108a,DISK]] 2024-11-19T08:45:58,651 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:58,652 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:58,652 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:58,652 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:58,652 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:58,653 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005943474 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005953590 2024-11-19T08:45:58,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:45:58,654 DEBUG [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T08:45:58,654 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44247:44247),(127.0.0.1/127.0.0.1:40295:40295)] 2024-11-19T08:45:58,654 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005943474 is not closed yet, will try archiving it next time 2024-11-19T08:45:58,654 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005916099 to hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/oldWALs/3ab37fa97a98%2C40001%2C1732005883022.1732005916099 2024-11-19T08:45:58,654 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3ab37fa97a98%2C40001%2C1732005883022:(num 1732005958654) roll requested 2024-11-19T08:45:58,654 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C40001%2C1732005883022.1732005958654 2024-11-19T08:45:58,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741843_1019 (size=438) 2024-11-19T08:45:58,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741843_1019 (size=438) 2024-11-19T08:45:58,656 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005927591 to hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/oldWALs/3ab37fa97a98%2C40001%2C1732005883022.1732005927591 2024-11-19T08:45:58,657 DEBUG [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T08:45:58,658 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005938406 to hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/oldWALs/3ab37fa97a98%2C40001%2C1732005883022.1732005938406 2024-11-19T08:45:58,659 DEBUG [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] regionserver.HStore(1541): 2aa178a41813493221279ba2be8d3a33/info is initiating minor compaction (all files) 2024-11-19T08:45:58,659 INFO [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 2aa178a41813493221279ba2be8d3a33/info in TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33. 2024-11-19T08:45:58,660 INFO [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/ed5645d3cd0746bcb63f6f0b5088577e, hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/a81ac768799a4ad489cfd9ff4205b7dc, hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/9f882b6fe4934739bc1ed4a0a13dc2b8] into tmpdir=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/.tmp, totalSize=36.6 K 2024-11-19T08:45:58,660 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005943474 to hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/oldWALs/3ab37fa97a98%2C40001%2C1732005883022.1732005943474 2024-11-19T08:45:58,661 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:58,661 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:58,661 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:58,661 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:58,661 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:58,661 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005953590 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005958654 2024-11-19T08:45:58,661 DEBUG [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] compactions.Compactor(225): Compacting ed5645d3cd0746bcb63f6f0b5088577e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732005896039 2024-11-19T08:45:58,662 DEBUG [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] compactions.Compactor(225): Compacting a81ac768799a4ad489cfd9ff4205b7dc, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732005910085 2024-11-19T08:45:58,663 DEBUG [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9f882b6fe4934739bc1ed4a0a13dc2b8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732005925132 2024-11-19T08:45:58,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741845_1021 (size=93) 2024-11-19T08:45:58,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741845_1021 (size=93) 2024-11-19T08:45:58,664 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005953590 to hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/oldWALs/3ab37fa97a98%2C40001%2C1732005883022.1732005953590 2024-11-19T08:45:58,668 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44247:44247),(127.0.0.1/127.0.0.1:40295:40295)] 2024-11-19T08:45:58,668 INFO [regionserver/3ab37fa97a98:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C40001%2C1732005883022.1732005958668 2024-11-19T08:45:58,681 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:58,681 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:58,681 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:58,681 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:58,681 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:45:58,682 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005958654 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005958668 2024-11-19T08:45:58,682 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44247:44247),(127.0.0.1/127.0.0.1:40295:40295)] 2024-11-19T08:45:58,682 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/WALs/3ab37fa97a98,40001,1732005883022/3ab37fa97a98%2C40001%2C1732005883022.1732005958654 is not closed yet, will try archiving it next time 2024-11-19T08:45:58,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741846_1022 (size=1258) 2024-11-19T08:45:58,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741846_1022 (size=1258) 2024-11-19T08:45:58,695 INFO [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2aa178a41813493221279ba2be8d3a33#info#compaction#3 average throughput is 7.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T08:45:58,696 DEBUG [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/.tmp/info/b6bfe8be2cb54606a286da1a8c5c4084 is 1080, key is row0001/info:/1732005896039/Put/seqid=0 2024-11-19T08:45:58,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741848_1024 (size=27710) 2024-11-19T08:45:58,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741848_1024 (size=27710) 2024-11-19T08:45:58,714 DEBUG [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/.tmp/info/b6bfe8be2cb54606a286da1a8c5c4084 as hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/b6bfe8be2cb54606a286da1a8c5c4084 2024-11-19T08:45:58,731 INFO [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 2aa178a41813493221279ba2be8d3a33/info of 2aa178a41813493221279ba2be8d3a33 into b6bfe8be2cb54606a286da1a8c5c4084(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T08:45:58,731 DEBUG [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 2aa178a41813493221279ba2be8d3a33: 2024-11-19T08:45:58,733 INFO [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33., storeName=2aa178a41813493221279ba2be8d3a33/info, priority=13, startTime=1732005958649; duration=0sec 2024-11-19T08:45:58,733 DEBUG [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-19T08:45:58,733 DEBUG [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:45:58,733 DEBUG [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/b6bfe8be2cb54606a286da1a8c5c4084 because midkey is the same as first or last row 2024-11-19T08:45:58,733 DEBUG [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-19T08:45:58,733 DEBUG [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:45:58,734 DEBUG [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/b6bfe8be2cb54606a286da1a8c5c4084 because midkey is the same as first or last row 2024-11-19T08:45:58,734 DEBUG [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-19T08:45:58,734 DEBUG [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:45:58,734 DEBUG [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/b6bfe8be2cb54606a286da1a8c5c4084 because midkey is the same as first or last row 2024-11-19T08:45:58,734 DEBUG [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:45:58,734 DEBUG [RS:0;3ab37fa97a98:40001-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2aa178a41813493221279ba2be8d3a33:info 2024-11-19T08:46:10,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40001 {}] regionserver.HRegion(8855): Flush requested on 2aa178a41813493221279ba2be8d3a33 2024-11-19T08:46:10,695 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2aa178a41813493221279ba2be8d3a33 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T08:46:10,703 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/.tmp/info/2db1cf07a00f4a2b9a9ab60b59e0e64e is 1080, key is row0022/info:/1732005958669/Put/seqid=0 2024-11-19T08:46:10,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741849_1025 (size=12509) 2024-11-19T08:46:10,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741849_1025 (size=12509) 2024-11-19T08:46:10,712 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/.tmp/info/2db1cf07a00f4a2b9a9ab60b59e0e64e 2024-11-19T08:46:10,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/.tmp/info/2db1cf07a00f4a2b9a9ab60b59e0e64e as hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/2db1cf07a00f4a2b9a9ab60b59e0e64e 2024-11-19T08:46:10,739 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/2db1cf07a00f4a2b9a9ab60b59e0e64e, entries=7, sequenceid=42, filesize=12.2 K 2024-11-19T08:46:10,741 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 2aa178a41813493221279ba2be8d3a33 in 46ms, sequenceid=42, compaction requested=false 2024-11-19T08:46:10,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2aa178a41813493221279ba2be8d3a33: 2024-11-19T08:46:10,741 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-19T08:46:10,741 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:46:10,741 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/b6bfe8be2cb54606a286da1a8c5c4084 because midkey is the same as first or last row 2024-11-19T08:46:10,977 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T08:46:16,767 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 2aa178a41813493221279ba2be8d3a33, had cached 0 bytes from a total of 40219 2024-11-19T08:46:18,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T08:46:18,707 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T08:46:18,707 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:46:18,712 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:46:18,713 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:46:18,713 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T08:46:18,714 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T08:46:18,714 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2134490824, stopped=false 2024-11-19T08:46:18,714 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3ab37fa97a98,34827,1732005882034 2024-11-19T08:46:18,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40001-0x101538df7ab0001, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T08:46:18,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T08:46:18,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:18,730 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T08:46:18,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40001-0x101538df7ab0001, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:18,730 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T08:46:18,731 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:46:18,731 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:46:18,731 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40001-0x101538df7ab0001, quorum=127.0.0.1:58282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:46:18,731 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:46:18,731 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3ab37fa97a98,40001,1732005883022' ***** 2024-11-19T08:46:18,731 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T08:46:18,732 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T08:46:18,732 INFO [RS:0;3ab37fa97a98:40001 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T08:46:18,732 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T08:46:18,732 INFO [RS:0;3ab37fa97a98:40001 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T08:46:18,733 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.HRegionServer(3091): Received CLOSE for 2aa178a41813493221279ba2be8d3a33 2024-11-19T08:46:18,733 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.HRegionServer(959): stopping server 3ab37fa97a98,40001,1732005883022 2024-11-19T08:46:18,733 INFO [RS:0;3ab37fa97a98:40001 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T08:46:18,734 INFO [RS:0;3ab37fa97a98:40001 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3ab37fa97a98:40001. 2024-11-19T08:46:18,734 DEBUG [RS:0;3ab37fa97a98:40001 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:46:18,734 DEBUG [RS:0;3ab37fa97a98:40001 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:46:18,734 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 2aa178a41813493221279ba2be8d3a33, disabling compactions & flushes 2024-11-19T08:46:18,734 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T08:46:18,734 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33. 2024-11-19T08:46:18,734 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T08:46:18,734 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33. 2024-11-19T08:46:18,734 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T08:46:18,734 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33. after waiting 0 ms 2024-11-19T08:46:18,734 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T08:46:18,734 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33. 2024-11-19T08:46:18,735 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 2aa178a41813493221279ba2be8d3a33 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-19T08:46:18,735 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T08:46:18,735 DEBUG [RS:0;3ab37fa97a98:40001 {}] regionserver.HRegionServer(1325): Online Regions={2aa178a41813493221279ba2be8d3a33=TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33., 1588230740=hbase:meta,,1.1588230740} 2024-11-19T08:46:18,735 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T08:46:18,735 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T08:46:18,735 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T08:46:18,735 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T08:46:18,735 DEBUG [RS:0;3ab37fa97a98:40001 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 2aa178a41813493221279ba2be8d3a33 2024-11-19T08:46:18,735 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T08:46:18,736 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-19T08:46:18,741 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/.tmp/info/4a4f6e0b945a4412b637fafa41ce8474 is 1080, key is row0029/info:/1732005972696/Put/seqid=0 2024-11-19T08:46:18,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741850_1026 (size=8193) 2024-11-19T08:46:18,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741850_1026 (size=8193) 2024-11-19T08:46:18,749 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/.tmp/info/4a4f6e0b945a4412b637fafa41ce8474 2024-11-19T08:46:18,759 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/.tmp/info/4a4f6e0b945a4412b637fafa41ce8474 as hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/4a4f6e0b945a4412b637fafa41ce8474 2024-11-19T08:46:18,760 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740/.tmp/info/e99f744d58304fb1b2a7361be1e84865 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33./info:regioninfo/1732005886793/Put/seqid=0 2024-11-19T08:46:18,769 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/4a4f6e0b945a4412b637fafa41ce8474, entries=3, sequenceid=48, filesize=8.0 K 2024-11-19T08:46:18,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741851_1027 (size=7016) 2024-11-19T08:46:18,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741851_1027 (size=7016) 2024-11-19T08:46:18,771 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 2aa178a41813493221279ba2be8d3a33 in 36ms, sequenceid=48, compaction requested=true 2024-11-19T08:46:18,771 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/ed5645d3cd0746bcb63f6f0b5088577e, hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/a81ac768799a4ad489cfd9ff4205b7dc, hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/9f882b6fe4934739bc1ed4a0a13dc2b8] to archive 2024-11-19T08:46:18,775 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T08:46:18,778 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/ed5645d3cd0746bcb63f6f0b5088577e to hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/archive/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/ed5645d3cd0746bcb63f6f0b5088577e 2024-11-19T08:46:18,778 INFO [regionserver/3ab37fa97a98:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T08:46:18,778 INFO [regionserver/3ab37fa97a98:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T08:46:18,780 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/a81ac768799a4ad489cfd9ff4205b7dc to hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/archive/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/a81ac768799a4ad489cfd9ff4205b7dc 2024-11-19T08:46:18,782 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/9f882b6fe4934739bc1ed4a0a13dc2b8 to hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/archive/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/info/9f882b6fe4934739bc1ed4a0a13dc2b8 2024-11-19T08:46:18,794 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3ab37fa97a98:34827 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-19T08:46:18,799 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [ed5645d3cd0746bcb63f6f0b5088577e=12509, a81ac768799a4ad489cfd9ff4205b7dc=12509, 9f882b6fe4934739bc1ed4a0a13dc2b8=12509] 2024-11-19T08:46:18,805 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/default/TestLogRolling-testSlowSyncLogRolling/2aa178a41813493221279ba2be8d3a33/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-19T08:46:18,807 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33. 2024-11-19T08:46:18,808 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 2aa178a41813493221279ba2be8d3a33: Waiting for close lock at 1732005978733Running coprocessor pre-close hooks at 1732005978734 (+1 ms)Disabling compacts and flushes for region at 1732005978734Disabling writes for close at 1732005978734Obtaining lock to block concurrent updates at 1732005978735 (+1 ms)Preparing flush snapshotting stores in 2aa178a41813493221279ba2be8d3a33 at 1732005978735Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732005978735Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33. at 1732005978736 (+1 ms)Flushing 2aa178a41813493221279ba2be8d3a33/info: creating writer at 1732005978736Flushing 2aa178a41813493221279ba2be8d3a33/info: appending metadata at 1732005978741 (+5 ms)Flushing 2aa178a41813493221279ba2be8d3a33/info: closing flushed file at 1732005978741Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2435d53d: reopening flushed file at 1732005978758 (+17 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 2aa178a41813493221279ba2be8d3a33 in 36ms, sequenceid=48, compaction requested=true at 1732005978771 (+13 ms)Writing region close event to WAL at 1732005978799 (+28 ms)Running coprocessor post-close hooks at 1732005978806 (+7 ms)Closed at 1732005978807 (+1 ms) 2024-11-19T08:46:18,808 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732005885919.2aa178a41813493221279ba2be8d3a33. 2024-11-19T08:46:18,936 DEBUG [RS:0;3ab37fa97a98:40001 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-19T08:46:19,136 DEBUG [RS:0;3ab37fa97a98:40001 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-19T08:46:19,172 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740/.tmp/info/e99f744d58304fb1b2a7361be1e84865 2024-11-19T08:46:19,199 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740/.tmp/ns/492e5615f3a140a59737ffce10a9d3d5 is 43, key is default/ns:d/1732005885620/Put/seqid=0 2024-11-19T08:46:19,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741852_1028 (size=5153) 2024-11-19T08:46:19,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741852_1028 (size=5153) 2024-11-19T08:46:19,211 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740/.tmp/ns/492e5615f3a140a59737ffce10a9d3d5 2024-11-19T08:46:19,238 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740/.tmp/table/94e615599c474dffa09a613c691afb41 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732005886812/Put/seqid=0 2024-11-19T08:46:19,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741853_1029 (size=5396) 2024-11-19T08:46:19,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741853_1029 (size=5396) 2024-11-19T08:46:19,249 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740/.tmp/table/94e615599c474dffa09a613c691afb41 2024-11-19T08:46:19,260 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740/.tmp/info/e99f744d58304fb1b2a7361be1e84865 as hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740/info/e99f744d58304fb1b2a7361be1e84865 2024-11-19T08:46:19,270 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740/info/e99f744d58304fb1b2a7361be1e84865, entries=10, sequenceid=11, filesize=6.9 K 2024-11-19T08:46:19,272 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740/.tmp/ns/492e5615f3a140a59737ffce10a9d3d5 as hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740/ns/492e5615f3a140a59737ffce10a9d3d5 2024-11-19T08:46:19,281 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740/ns/492e5615f3a140a59737ffce10a9d3d5, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T08:46:19,283 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740/.tmp/table/94e615599c474dffa09a613c691afb41 as hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740/table/94e615599c474dffa09a613c691afb41 2024-11-19T08:46:19,292 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740/table/94e615599c474dffa09a613c691afb41, entries=2, sequenceid=11, filesize=5.3 K 2024-11-19T08:46:19,294 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 559ms, sequenceid=11, compaction requested=false 2024-11-19T08:46:19,305 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T08:46:19,306 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T08:46:19,306 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T08:46:19,306 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732005978735Running coprocessor pre-close hooks at 1732005978735Disabling compacts and flushes for region at 1732005978735Disabling writes for close at 1732005978735Obtaining lock to block concurrent updates at 1732005978736 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732005978736Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732005978736Flushing stores of hbase:meta,,1.1588230740 at 1732005978737 (+1 ms)Flushing 1588230740/info: creating writer at 1732005978737Flushing 1588230740/info: appending metadata at 1732005978759 (+22 ms)Flushing 1588230740/info: closing flushed file at 1732005978759Flushing 1588230740/ns: creating writer at 1732005979183 (+424 ms)Flushing 1588230740/ns: appending metadata at 1732005979199 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732005979199Flushing 1588230740/table: creating writer at 1732005979219 (+20 ms)Flushing 1588230740/table: appending metadata at 1732005979237 (+18 ms)Flushing 1588230740/table: closing flushed file at 1732005979237Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45f30739: reopening flushed file at 1732005979259 (+22 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b3b0c64: reopening flushed file at 1732005979270 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41fde5db: reopening flushed file at 1732005979281 (+11 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 559ms, sequenceid=11, compaction requested=false at 1732005979294 (+13 ms)Writing region close event to WAL at 1732005979299 (+5 ms)Running coprocessor post-close hooks at 1732005979306 (+7 ms)Closed at 1732005979306 2024-11-19T08:46:19,306 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T08:46:19,336 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.HRegionServer(976): stopping server 3ab37fa97a98,40001,1732005883022; all regions closed. 2024-11-19T08:46:19,338 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:19,338 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:19,339 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:19,339 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:19,339 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:19,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741834_1010 (size=3066) 2024-11-19T08:46:19,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741834_1010 (size=3066) 2024-11-19T08:46:19,347 DEBUG [RS:0;3ab37fa97a98:40001 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/oldWALs 2024-11-19T08:46:19,347 INFO [RS:0;3ab37fa97a98:40001 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3ab37fa97a98%2C40001%2C1732005883022.meta:.meta(num 1732005885343) 2024-11-19T08:46:19,348 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:19,348 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:19,348 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:19,348 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:19,348 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:19,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741847_1023 (size=12695) 2024-11-19T08:46:19,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741847_1023 (size=12695) 2024-11-19T08:46:19,356 DEBUG [RS:0;3ab37fa97a98:40001 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/oldWALs 2024-11-19T08:46:19,356 INFO [RS:0;3ab37fa97a98:40001 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3ab37fa97a98%2C40001%2C1732005883022:(num 1732005958668) 2024-11-19T08:46:19,356 DEBUG [RS:0;3ab37fa97a98:40001 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:46:19,356 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T08:46:19,356 INFO [RS:0;3ab37fa97a98:40001 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T08:46:19,356 INFO [RS:0;3ab37fa97a98:40001 {}] hbase.ChoreService(370): Chore service for: regionserver/3ab37fa97a98:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T08:46:19,356 INFO [RS:0;3ab37fa97a98:40001 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T08:46:19,356 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T08:46:19,357 INFO [RS:0;3ab37fa97a98:40001 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40001 2024-11-19T08:46:19,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40001-0x101538df7ab0001, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3ab37fa97a98,40001,1732005883022 2024-11-19T08:46:19,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T08:46:19,405 INFO [RS:0;3ab37fa97a98:40001 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T08:46:19,406 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3ab37fa97a98,40001,1732005883022] 2024-11-19T08:46:19,424 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3ab37fa97a98,40001,1732005883022 already deleted, retry=false 2024-11-19T08:46:19,424 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3ab37fa97a98,40001,1732005883022 expired; onlineServers=0 2024-11-19T08:46:19,424 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3ab37fa97a98,34827,1732005882034' ***** 2024-11-19T08:46:19,424 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T08:46:19,425 INFO [M:0;3ab37fa97a98:34827 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T08:46:19,425 INFO [M:0;3ab37fa97a98:34827 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T08:46:19,425 DEBUG [M:0;3ab37fa97a98:34827 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T08:46:19,425 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T08:46:19,425 DEBUG [M:0;3ab37fa97a98:34827 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T08:46:19,425 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732005884507 {}] cleaner.HFileCleaner(306): Exit Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732005884507,5,FailOnTimeoutGroup] 2024-11-19T08:46:19,425 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732005884501 {}] cleaner.HFileCleaner(306): Exit Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732005884501,5,FailOnTimeoutGroup] 2024-11-19T08:46:19,426 INFO [M:0;3ab37fa97a98:34827 {}] hbase.ChoreService(370): Chore service for: master/3ab37fa97a98:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T08:46:19,426 INFO [M:0;3ab37fa97a98:34827 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T08:46:19,426 DEBUG [M:0;3ab37fa97a98:34827 {}] master.HMaster(1795): Stopping service threads 2024-11-19T08:46:19,426 INFO [M:0;3ab37fa97a98:34827 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T08:46:19,426 INFO [M:0;3ab37fa97a98:34827 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T08:46:19,427 INFO [M:0;3ab37fa97a98:34827 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T08:46:19,427 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T08:46:19,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T08:46:19,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:19,432 DEBUG [M:0;3ab37fa97a98:34827 {}] zookeeper.ZKUtil(347): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T08:46:19,433 WARN [M:0;3ab37fa97a98:34827 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T08:46:19,433 INFO [M:0;3ab37fa97a98:34827 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/.lastflushedseqids 2024-11-19T08:46:19,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741854_1030 (size=130) 2024-11-19T08:46:19,446 INFO [M:0;3ab37fa97a98:34827 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T08:46:19,446 INFO [M:0;3ab37fa97a98:34827 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T08:46:19,446 DEBUG [M:0;3ab37fa97a98:34827 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T08:46:19,446 INFO [M:0;3ab37fa97a98:34827 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:46:19,446 DEBUG [M:0;3ab37fa97a98:34827 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:46:19,446 DEBUG [M:0;3ab37fa97a98:34827 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T08:46:19,446 DEBUG [M:0;3ab37fa97a98:34827 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:46:19,447 INFO [M:0;3ab37fa97a98:34827 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-11-19T08:46:19,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741854_1030 (size=130) 2024-11-19T08:46:19,470 DEBUG [M:0;3ab37fa97a98:34827 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c9be1a9a25ea420aa20e99f2900ba585 is 82, key is hbase:meta,,1/info:regioninfo/1732005885443/Put/seqid=0 2024-11-19T08:46:19,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741855_1031 (size=5672) 2024-11-19T08:46:19,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741855_1031 (size=5672) 2024-11-19T08:46:19,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40001-0x101538df7ab0001, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:46:19,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40001-0x101538df7ab0001, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:46:19,517 INFO [RS:0;3ab37fa97a98:40001 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T08:46:19,517 INFO [RS:0;3ab37fa97a98:40001 {}] regionserver.HRegionServer(1031): Exiting; stopping=3ab37fa97a98,40001,1732005883022; zookeeper connection closed. 2024-11-19T08:46:19,518 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1e05bd64 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1e05bd64 2024-11-19T08:46:19,518 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T08:46:19,880 INFO [M:0;3ab37fa97a98:34827 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c9be1a9a25ea420aa20e99f2900ba585 2024-11-19T08:46:19,907 DEBUG [M:0;3ab37fa97a98:34827 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a9002fde44d94b8baefab4472af02f34 is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732005886824/Put/seqid=0 2024-11-19T08:46:19,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741856_1032 (size=6248) 2024-11-19T08:46:19,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741856_1032 (size=6248) 2024-11-19T08:46:19,914 INFO [M:0;3ab37fa97a98:34827 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a9002fde44d94b8baefab4472af02f34 2024-11-19T08:46:19,920 INFO [M:0;3ab37fa97a98:34827 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a9002fde44d94b8baefab4472af02f34 2024-11-19T08:46:19,938 DEBUG [M:0;3ab37fa97a98:34827 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b3234b7116e8430b93a38dc6508b67a8 is 69, key is 3ab37fa97a98,40001,1732005883022/rs:state/1732005884585/Put/seqid=0 2024-11-19T08:46:19,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741857_1033 (size=5156) 2024-11-19T08:46:19,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741857_1033 (size=5156) 2024-11-19T08:46:19,948 INFO [M:0;3ab37fa97a98:34827 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b3234b7116e8430b93a38dc6508b67a8 2024-11-19T08:46:19,974 DEBUG [M:0;3ab37fa97a98:34827 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1efe75a580754d1d8ea192a5f11f9bb1 is 52, key is load_balancer_on/state:d/1732005885874/Put/seqid=0 2024-11-19T08:46:19,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741858_1034 (size=5056) 2024-11-19T08:46:19,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741858_1034 (size=5056) 2024-11-19T08:46:19,983 INFO [M:0;3ab37fa97a98:34827 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1efe75a580754d1d8ea192a5f11f9bb1 2024-11-19T08:46:19,990 DEBUG [M:0;3ab37fa97a98:34827 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c9be1a9a25ea420aa20e99f2900ba585 as hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c9be1a9a25ea420aa20e99f2900ba585 2024-11-19T08:46:19,999 INFO [M:0;3ab37fa97a98:34827 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c9be1a9a25ea420aa20e99f2900ba585, entries=8, sequenceid=59, filesize=5.5 K 2024-11-19T08:46:20,001 DEBUG [M:0;3ab37fa97a98:34827 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a9002fde44d94b8baefab4472af02f34 as hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a9002fde44d94b8baefab4472af02f34 2024-11-19T08:46:20,009 INFO [M:0;3ab37fa97a98:34827 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a9002fde44d94b8baefab4472af02f34 2024-11-19T08:46:20,010 INFO [M:0;3ab37fa97a98:34827 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a9002fde44d94b8baefab4472af02f34, entries=6, sequenceid=59, filesize=6.1 K 2024-11-19T08:46:20,011 DEBUG [M:0;3ab37fa97a98:34827 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b3234b7116e8430b93a38dc6508b67a8 as hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b3234b7116e8430b93a38dc6508b67a8 2024-11-19T08:46:20,019 INFO [M:0;3ab37fa97a98:34827 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b3234b7116e8430b93a38dc6508b67a8, entries=1, sequenceid=59, filesize=5.0 K 2024-11-19T08:46:20,021 DEBUG [M:0;3ab37fa97a98:34827 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1efe75a580754d1d8ea192a5f11f9bb1 as hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1efe75a580754d1d8ea192a5f11f9bb1 2024-11-19T08:46:20,027 INFO [M:0;3ab37fa97a98:34827 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1efe75a580754d1d8ea192a5f11f9bb1, entries=1, sequenceid=59, filesize=4.9 K 2024-11-19T08:46:20,029 INFO [M:0;3ab37fa97a98:34827 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 582ms, sequenceid=59, compaction requested=false 2024-11-19T08:46:20,031 INFO [M:0;3ab37fa97a98:34827 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:46:20,031 DEBUG [M:0;3ab37fa97a98:34827 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732005979446Disabling compacts and flushes for region at 1732005979446Disabling writes for close at 1732005979446Obtaining lock to block concurrent updates at 1732005979447 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732005979447Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1732005979447Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732005979448 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732005979448Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732005979469 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732005979469Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732005979889 (+420 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732005979906 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732005979906Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732005979920 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732005979938 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732005979938Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732005979955 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732005979973 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732005979973Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40f20219: reopening flushed file at 1732005979989 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4603886a: reopening flushed file at 1732005979999 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48047b30: reopening flushed file at 1732005980010 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45c50669: reopening flushed file at 1732005980019 (+9 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 582ms, sequenceid=59, compaction requested=false at 1732005980029 (+10 ms)Writing region close event to WAL at 1732005980030 (+1 ms)Closed at 1732005980030 2024-11-19T08:46:20,035 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:20,035 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:20,035 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:20,035 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:20,035 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:20,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44347 is added to blk_1073741830_1006 (size=27985) 2024-11-19T08:46:20,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43377 is added to blk_1073741830_1006 (size=27985) 2024-11-19T08:46:20,038 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T08:46:20,039 INFO [M:0;3ab37fa97a98:34827 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T08:46:20,039 INFO [M:0;3ab37fa97a98:34827 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34827 2024-11-19T08:46:20,039 INFO [M:0;3ab37fa97a98:34827 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T08:46:20,188 INFO [M:0;3ab37fa97a98:34827 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T08:46:20,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:46:20,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34827-0x101538df7ab0000, quorum=127.0.0.1:58282, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:46:20,195 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55d18735{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:46:20,197 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15370523{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:46:20,198 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:46:20,198 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@335a4f9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:46:20,198 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ae73635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/hadoop.log.dir/,STOPPED} 2024-11-19T08:46:20,201 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:46:20,201 WARN [BP-1381190410-172.17.0.2-1732005877848 heartbeating to localhost/127.0.0.1:34933 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:46:20,201 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:46:20,201 WARN [BP-1381190410-172.17.0.2-1732005877848 heartbeating to localhost/127.0.0.1:34933 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1381190410-172.17.0.2-1732005877848 (Datanode Uuid 07ab3d2f-431a-46c2-8ad3-da62006b1003) service to localhost/127.0.0.1:34933 2024-11-19T08:46:20,202 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/cluster_627d3e3e-a1f0-5f83-8289-f2151de6d0af/data/data3/current/BP-1381190410-172.17.0.2-1732005877848 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:46:20,202 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/cluster_627d3e3e-a1f0-5f83-8289-f2151de6d0af/data/data4/current/BP-1381190410-172.17.0.2-1732005877848 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:46:20,203 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:46:20,205 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59e63bea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:46:20,206 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@264a9341{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:46:20,206 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:46:20,206 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bdc1e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:46:20,206 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2aa5bb6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/hadoop.log.dir/,STOPPED} 2024-11-19T08:46:20,208 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:46:20,208 WARN [BP-1381190410-172.17.0.2-1732005877848 heartbeating to localhost/127.0.0.1:34933 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:46:20,208 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:46:20,208 WARN [BP-1381190410-172.17.0.2-1732005877848 heartbeating to localhost/127.0.0.1:34933 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1381190410-172.17.0.2-1732005877848 (Datanode Uuid 767fce67-0218-4400-aa28-1d407d9526c8) service to localhost/127.0.0.1:34933 2024-11-19T08:46:20,209 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/cluster_627d3e3e-a1f0-5f83-8289-f2151de6d0af/data/data1/current/BP-1381190410-172.17.0.2-1732005877848 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:46:20,209 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/cluster_627d3e3e-a1f0-5f83-8289-f2151de6d0af/data/data2/current/BP-1381190410-172.17.0.2-1732005877848 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:46:20,209 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:46:20,220 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c77270f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T08:46:20,221 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c5145e6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:46:20,221 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:46:20,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46a86f8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:46:20,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f2ab976{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/hadoop.log.dir/,STOPPED} 2024-11-19T08:46:20,231 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T08:46:20,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T08:46:20,277 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=78 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@3066d428 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34933 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:34933 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/3ab37fa97a98:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:34933 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34933 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/3ab37fa97a98:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34933 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:34933 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:34933 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34933 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: master/3ab37fa97a98:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: regionserver/3ab37fa97a98:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=287 (was 266) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6418 (was 7144) 2024-11-19T08:46:20,285 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=79, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=287, ProcessCount=11, AvailableMemoryMB=6418 2024-11-19T08:46:20,285 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T08:46:20,286 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/hadoop.log.dir so I do NOT create it in target/test-data/b324852b-78c6-c383-386f-50842ddd008a 2024-11-19T08:46:20,286 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f56cc966-f4dd-61d9-5389-f693acf769d6/hadoop.tmp.dir so I do NOT create it in target/test-data/b324852b-78c6-c383-386f-50842ddd008a 2024-11-19T08:46:20,286 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/cluster_d153661b-3be7-294b-d233-f9ea5100725b, deleteOnExit=true 2024-11-19T08:46:20,286 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T08:46:20,286 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/test.cache.data in system properties and HBase conf 2024-11-19T08:46:20,286 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T08:46:20,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/hadoop.log.dir in system properties and HBase conf 2024-11-19T08:46:20,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T08:46:20,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T08:46:20,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T08:46:20,287 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T08:46:20,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T08:46:20,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T08:46:20,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T08:46:20,288 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T08:46:20,288 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T08:46:20,288 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T08:46:20,288 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T08:46:20,288 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T08:46:20,288 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T08:46:20,288 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/nfs.dump.dir in system properties and HBase conf 2024-11-19T08:46:20,288 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/java.io.tmpdir in system properties and HBase conf 2024-11-19T08:46:20,288 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T08:46:20,289 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T08:46:20,289 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T08:46:20,304 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T08:46:20,561 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:46:20,568 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:46:20,572 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:46:20,572 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:46:20,573 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T08:46:20,573 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:46:20,574 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ea6e47a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:46:20,574 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@124e4130{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:46:20,700 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6db7bfac{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/java.io.tmpdir/jetty-localhost-41859-hadoop-hdfs-3_4_1-tests_jar-_-any-9293880655202712885/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T08:46:20,700 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5f2bc681{HTTP/1.1, (http/1.1)}{localhost:41859} 2024-11-19T08:46:20,700 INFO [Time-limited test {}] server.Server(415): Started @105228ms 2024-11-19T08:46:20,704 INFO [regionserver/3ab37fa97a98:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T08:46:20,717 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T08:46:20,937 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:46:20,941 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:46:20,943 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:46:20,943 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:46:20,943 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T08:46:20,943 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@33e82987{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:46:20,944 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3590efb4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:46:21,054 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2377404e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/java.io.tmpdir/jetty-localhost-34049-hadoop-hdfs-3_4_1-tests_jar-_-any-18064428706792192879/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:46:21,054 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3d659f5b{HTTP/1.1, (http/1.1)}{localhost:34049} 2024-11-19T08:46:21,055 INFO [Time-limited test {}] server.Server(415): Started @105582ms 2024-11-19T08:46:21,056 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:46:21,104 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:46:21,109 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:46:21,110 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:46:21,110 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:46:21,110 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T08:46:21,110 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4fcb1c4b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:46:21,111 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b589a7d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:46:21,220 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@32b5a1b3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/java.io.tmpdir/jetty-localhost-37985-hadoop-hdfs-3_4_1-tests_jar-_-any-14990831149507171918/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:46:21,220 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@76b25f02{HTTP/1.1, (http/1.1)}{localhost:37985} 2024-11-19T08:46:21,220 INFO [Time-limited test {}] server.Server(415): Started @105748ms 2024-11-19T08:46:21,222 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:46:21,840 WARN [Thread-451 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/cluster_d153661b-3be7-294b-d233-f9ea5100725b/data/data1/current/BP-1390567527-172.17.0.2-1732005980317/current, will proceed with Du for space computation calculation, 2024-11-19T08:46:21,840 WARN [Thread-452 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/cluster_d153661b-3be7-294b-d233-f9ea5100725b/data/data2/current/BP-1390567527-172.17.0.2-1732005980317/current, will proceed with Du for space computation calculation, 2024-11-19T08:46:21,862 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:46:21,866 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2b91765a72abb224 with lease ID 0xa44843b0df87377b: Processing first storage report for DS-0d55675c-0739-44b8-be36-24ff3d8ee9e5 from datanode DatanodeRegistration(127.0.0.1:33257, datanodeUuid=3e9e5f94-798c-4260-ad92-c80e278dea43, infoPort=36503, infoSecurePort=0, ipcPort=35393, storageInfo=lv=-57;cid=testClusterID;nsid=1881492712;c=1732005980317) 2024-11-19T08:46:21,866 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2b91765a72abb224 with lease ID 0xa44843b0df87377b: from storage DS-0d55675c-0739-44b8-be36-24ff3d8ee9e5 node DatanodeRegistration(127.0.0.1:33257, datanodeUuid=3e9e5f94-798c-4260-ad92-c80e278dea43, infoPort=36503, infoSecurePort=0, ipcPort=35393, storageInfo=lv=-57;cid=testClusterID;nsid=1881492712;c=1732005980317), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:46:21,866 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2b91765a72abb224 with lease ID 0xa44843b0df87377b: Processing first storage report for DS-d30743cd-0479-42c8-8410-ef6e90159dfa from datanode DatanodeRegistration(127.0.0.1:33257, datanodeUuid=3e9e5f94-798c-4260-ad92-c80e278dea43, infoPort=36503, infoSecurePort=0, ipcPort=35393, storageInfo=lv=-57;cid=testClusterID;nsid=1881492712;c=1732005980317) 2024-11-19T08:46:21,866 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2b91765a72abb224 with lease ID 0xa44843b0df87377b: from storage DS-d30743cd-0479-42c8-8410-ef6e90159dfa node DatanodeRegistration(127.0.0.1:33257, datanodeUuid=3e9e5f94-798c-4260-ad92-c80e278dea43, infoPort=36503, infoSecurePort=0, ipcPort=35393, storageInfo=lv=-57;cid=testClusterID;nsid=1881492712;c=1732005980317), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:46:22,044 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/cluster_d153661b-3be7-294b-d233-f9ea5100725b/data/data4/current/BP-1390567527-172.17.0.2-1732005980317/current, will proceed with Du for space computation calculation, 2024-11-19T08:46:22,044 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/cluster_d153661b-3be7-294b-d233-f9ea5100725b/data/data3/current/BP-1390567527-172.17.0.2-1732005980317/current, will proceed with Du for space computation calculation, 2024-11-19T08:46:22,065 WARN [Thread-439 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:46:22,067 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2f3bbcfe19dc8354 with lease ID 0xa44843b0df87377c: Processing first storage report for DS-75062cba-a71f-4a92-a27d-5c029d79e858 from datanode DatanodeRegistration(127.0.0.1:41941, datanodeUuid=70f2f9df-545e-45de-bbb9-b83959b95b4c, infoPort=42311, infoSecurePort=0, ipcPort=38597, storageInfo=lv=-57;cid=testClusterID;nsid=1881492712;c=1732005980317) 2024-11-19T08:46:22,068 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2f3bbcfe19dc8354 with lease ID 0xa44843b0df87377c: from storage DS-75062cba-a71f-4a92-a27d-5c029d79e858 node DatanodeRegistration(127.0.0.1:41941, datanodeUuid=70f2f9df-545e-45de-bbb9-b83959b95b4c, infoPort=42311, infoSecurePort=0, ipcPort=38597, storageInfo=lv=-57;cid=testClusterID;nsid=1881492712;c=1732005980317), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:46:22,068 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2f3bbcfe19dc8354 with lease ID 0xa44843b0df87377c: Processing first storage report for DS-8744f87e-9162-4c5e-bece-4865c79620eb from datanode DatanodeRegistration(127.0.0.1:41941, datanodeUuid=70f2f9df-545e-45de-bbb9-b83959b95b4c, infoPort=42311, infoSecurePort=0, ipcPort=38597, storageInfo=lv=-57;cid=testClusterID;nsid=1881492712;c=1732005980317) 2024-11-19T08:46:22,068 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2f3bbcfe19dc8354 with lease ID 0xa44843b0df87377c: from storage DS-8744f87e-9162-4c5e-bece-4865c79620eb node DatanodeRegistration(127.0.0.1:41941, datanodeUuid=70f2f9df-545e-45de-bbb9-b83959b95b4c, infoPort=42311, infoSecurePort=0, ipcPort=38597, storageInfo=lv=-57;cid=testClusterID;nsid=1881492712;c=1732005980317), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:46:22,169 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a 2024-11-19T08:46:22,173 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/cluster_d153661b-3be7-294b-d233-f9ea5100725b/zookeeper_0, clientPort=61536, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/cluster_d153661b-3be7-294b-d233-f9ea5100725b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/cluster_d153661b-3be7-294b-d233-f9ea5100725b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T08:46:22,174 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61536 2024-11-19T08:46:22,174 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:46:22,176 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:46:22,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741825_1001 (size=7) 2024-11-19T08:46:22,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41941 is added to blk_1073741825_1001 (size=7) 2024-11-19T08:46:22,188 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef with version=8 2024-11-19T08:46:22,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/hbase-staging 2024-11-19T08:46:22,192 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3ab37fa97a98:0 server-side Connection retries=45 2024-11-19T08:46:22,192 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:46:22,192 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T08:46:22,192 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T08:46:22,192 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:46:22,192 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T08:46:22,192 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T08:46:22,193 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T08:46:22,193 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36969 2024-11-19T08:46:22,196 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36969 connecting to ZooKeeper ensemble=127.0.0.1:61536 2024-11-19T08:46:22,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:369690x0, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T08:46:22,243 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36969-0x101538f82a10000 connected 2024-11-19T08:46:22,307 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:46:22,311 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:46:22,319 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:46:22,320 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef, hbase.cluster.distributed=false 2024-11-19T08:46:22,324 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T08:46:22,325 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36969 2024-11-19T08:46:22,325 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36969 2024-11-19T08:46:22,325 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36969 2024-11-19T08:46:22,327 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36969 2024-11-19T08:46:22,327 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36969 2024-11-19T08:46:22,346 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3ab37fa97a98:0 server-side Connection retries=45 2024-11-19T08:46:22,346 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:46:22,346 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T08:46:22,346 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T08:46:22,346 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:46:22,346 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T08:46:22,346 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T08:46:22,346 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T08:46:22,347 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42327 2024-11-19T08:46:22,349 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42327 connecting to ZooKeeper ensemble=127.0.0.1:61536 2024-11-19T08:46:22,349 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:46:22,351 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:46:22,371 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:423270x0, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T08:46:22,371 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42327-0x101538f82a10001 connected 2024-11-19T08:46:22,372 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:46:22,372 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T08:46:22,375 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T08:46:22,376 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T08:46:22,377 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T08:46:22,378 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42327 2024-11-19T08:46:22,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42327 2024-11-19T08:46:22,382 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42327 2024-11-19T08:46:22,387 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42327 2024-11-19T08:46:22,387 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42327 2024-11-19T08:46:22,405 DEBUG [M:0;3ab37fa97a98:36969 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3ab37fa97a98:36969 2024-11-19T08:46:22,405 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3ab37fa97a98,36969,1732005982191 2024-11-19T08:46:22,415 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:46:22,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:46:22,415 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3ab37fa97a98,36969,1732005982191 2024-11-19T08:46:22,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:22,423 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T08:46:22,423 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:22,423 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T08:46:22,424 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3ab37fa97a98,36969,1732005982191 from backup master directory 2024-11-19T08:46:22,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3ab37fa97a98,36969,1732005982191 2024-11-19T08:46:22,431 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:46:22,431 WARN [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T08:46:22,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:46:22,431 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3ab37fa97a98,36969,1732005982191 2024-11-19T08:46:22,440 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/hbase.id] with ID: d4981f0c-e144-47b9-99e2-db4e1881e80a 2024-11-19T08:46:22,440 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/.tmp/hbase.id 2024-11-19T08:46:22,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41941 is added to blk_1073741826_1002 (size=42) 2024-11-19T08:46:22,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741826_1002 (size=42) 2024-11-19T08:46:22,447 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/.tmp/hbase.id]:[hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/hbase.id] 2024-11-19T08:46:22,464 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:46:22,464 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T08:46:22,466 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-19T08:46:22,473 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:22,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:22,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741827_1003 (size=196) 2024-11-19T08:46:22,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41941 is added to blk_1073741827_1003 (size=196) 2024-11-19T08:46:22,484 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T08:46:22,485 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T08:46:22,486 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:46:22,492 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T08:46:22,492 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T08:46:22,493 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T08:46:22,494 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-19T08:46:22,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741828_1004 (size=1189) 2024-11-19T08:46:22,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41941 is added to blk_1073741828_1004 (size=1189) 2024-11-19T08:46:22,496 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store 2024-11-19T08:46:22,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741829_1005 (size=34) 2024-11-19T08:46:22,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41941 is added to blk_1073741829_1005 (size=34) 2024-11-19T08:46:22,506 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:46:22,506 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T08:46:22,506 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:46:22,506 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:46:22,507 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T08:46:22,507 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:46:22,507 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:46:22,507 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732005982506Disabling compacts and flushes for region at 1732005982506Disabling writes for close at 1732005982507 (+1 ms)Writing region close event to WAL at 1732005982507Closed at 1732005982507 2024-11-19T08:46:22,509 WARN [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/.initializing 2024-11-19T08:46:22,509 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/WALs/3ab37fa97a98,36969,1732005982191 2024-11-19T08:46:22,512 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C36969%2C1732005982191, suffix=, logDir=hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/WALs/3ab37fa97a98,36969,1732005982191, archiveDir=hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/oldWALs, maxLogs=10 2024-11-19T08:46:22,513 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C36969%2C1732005982191.1732005982513 2024-11-19T08:46:22,521 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/WALs/3ab37fa97a98,36969,1732005982191/3ab37fa97a98%2C36969%2C1732005982191.1732005982513 2024-11-19T08:46:22,523 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42311:42311),(127.0.0.1/127.0.0.1:36503:36503)] 2024-11-19T08:46:22,524 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T08:46:22,524 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:46:22,524 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:22,524 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:22,526 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:22,528 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T08:46:22,529 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:22,529 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:46:22,529 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:22,531 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T08:46:22,531 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:22,532 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:46:22,532 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:22,535 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T08:46:22,535 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:22,536 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:46:22,536 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:22,537 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T08:46:22,537 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:22,538 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:46:22,538 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:22,539 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:22,540 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:22,541 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:22,541 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:22,542 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T08:46:22,543 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:22,545 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T08:46:22,546 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=722045, jitterRate=-0.0818723738193512}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T08:46:22,547 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732005982524Initializing all the Stores at 1732005982526 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005982526Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732005982526Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732005982526Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732005982526Cleaning up temporary data from old regions at 1732005982541 (+15 ms)Region opened successfully at 1732005982547 (+6 ms) 2024-11-19T08:46:22,548 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T08:46:22,554 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75c4745, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3ab37fa97a98/172.17.0.2:0 2024-11-19T08:46:22,555 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T08:46:22,555 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T08:46:22,555 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T08:46:22,555 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T08:46:22,556 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T08:46:22,557 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T08:46:22,557 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T08:46:22,561 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T08:46:22,562 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T08:46:22,587 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T08:46:22,588 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T08:46:22,589 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T08:46:22,598 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T08:46:22,598 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T08:46:22,600 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T08:46:22,606 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T08:46:22,608 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T08:46:22,614 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T08:46:22,619 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T08:46:22,629 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T08:46:22,640 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T08:46:22,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T08:46:22,640 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:22,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:22,640 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3ab37fa97a98,36969,1732005982191, sessionid=0x101538f82a10000, setting cluster-up flag (Was=false) 2024-11-19T08:46:22,656 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:22,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:22,681 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T08:46:22,683 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3ab37fa97a98,36969,1732005982191 2024-11-19T08:46:22,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:22,698 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:22,723 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T08:46:22,725 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3ab37fa97a98,36969,1732005982191 2024-11-19T08:46:22,728 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T08:46:22,730 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T08:46:22,730 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T08:46:22,731 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T08:46:22,731 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3ab37fa97a98,36969,1732005982191 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T08:46:22,734 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:46:22,734 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:46:22,734 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:46:22,734 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:46:22,734 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3ab37fa97a98:0, corePoolSize=10, maxPoolSize=10 2024-11-19T08:46:22,734 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:22,735 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=2, maxPoolSize=2 2024-11-19T08:46:22,735 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:22,735 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732006012735 2024-11-19T08:46:22,736 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T08:46:22,736 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T08:46:22,736 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T08:46:22,736 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T08:46:22,736 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T08:46:22,736 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T08:46:22,736 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:22,737 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T08:46:22,737 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T08:46:22,737 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T08:46:22,737 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:46:22,737 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T08:46:22,737 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T08:46:22,737 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T08:46:22,737 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732005982737,5,FailOnTimeoutGroup] 2024-11-19T08:46:22,737 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732005982737,5,FailOnTimeoutGroup] 2024-11-19T08:46:22,737 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:22,738 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T08:46:22,738 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:22,738 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:22,738 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:22,739 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T08:46:22,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41941 is added to blk_1073741831_1007 (size=1321) 2024-11-19T08:46:22,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741831_1007 (size=1321) 2024-11-19T08:46:22,748 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T08:46:22,749 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef 2024-11-19T08:46:22,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741832_1008 (size=32) 2024-11-19T08:46:22,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41941 is added to blk_1073741832_1008 (size=32) 2024-11-19T08:46:22,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:46:22,767 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T08:46:22,769 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T08:46:22,769 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:22,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:46:22,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T08:46:22,771 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T08:46:22,771 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:22,772 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:46:22,772 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T08:46:22,774 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T08:46:22,774 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:22,774 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:46:22,775 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T08:46:22,776 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T08:46:22,776 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:22,777 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:46:22,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T08:46:22,778 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/data/hbase/meta/1588230740 2024-11-19T08:46:22,779 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/data/hbase/meta/1588230740 2024-11-19T08:46:22,780 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T08:46:22,780 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T08:46:22,781 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T08:46:22,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T08:46:22,785 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T08:46:22,785 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=733344, jitterRate=-0.06750562787055969}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T08:46:22,787 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732005982765Initializing all the Stores at 1732005982766 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005982766Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005982767 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732005982767Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005982767Cleaning up temporary data from old regions at 1732005982780 (+13 ms)Region opened successfully at 1732005982786 (+6 ms) 2024-11-19T08:46:22,787 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T08:46:22,787 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T08:46:22,787 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T08:46:22,787 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T08:46:22,787 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T08:46:22,788 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T08:46:22,788 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732005982787Disabling compacts and flushes for region at 1732005982787Disabling writes for close at 1732005982787Writing region close event to WAL at 1732005982787Closed at 1732005982787 2024-11-19T08:46:22,789 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:46:22,789 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T08:46:22,789 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T08:46:22,791 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T08:46:22,792 INFO [RS:0;3ab37fa97a98:42327 {}] regionserver.HRegionServer(746): ClusterId : d4981f0c-e144-47b9-99e2-db4e1881e80a 2024-11-19T08:46:22,792 DEBUG [RS:0;3ab37fa97a98:42327 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T08:46:22,793 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T08:46:22,808 DEBUG [RS:0;3ab37fa97a98:42327 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T08:46:22,808 DEBUG [RS:0;3ab37fa97a98:42327 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T08:46:22,815 DEBUG [RS:0;3ab37fa97a98:42327 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T08:46:22,816 DEBUG [RS:0;3ab37fa97a98:42327 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1130490, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3ab37fa97a98/172.17.0.2:0 2024-11-19T08:46:22,828 DEBUG [RS:0;3ab37fa97a98:42327 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3ab37fa97a98:42327 2024-11-19T08:46:22,829 INFO [RS:0;3ab37fa97a98:42327 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T08:46:22,829 INFO [RS:0;3ab37fa97a98:42327 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T08:46:22,829 DEBUG [RS:0;3ab37fa97a98:42327 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T08:46:22,830 INFO [RS:0;3ab37fa97a98:42327 {}] regionserver.HRegionServer(2659): reportForDuty to master=3ab37fa97a98,36969,1732005982191 with port=42327, startcode=1732005982345 2024-11-19T08:46:22,830 DEBUG [RS:0;3ab37fa97a98:42327 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T08:46:22,833 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44741, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T08:46:22,833 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36969 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3ab37fa97a98,42327,1732005982345 2024-11-19T08:46:22,833 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36969 {}] master.ServerManager(517): Registering regionserver=3ab37fa97a98,42327,1732005982345 2024-11-19T08:46:22,836 DEBUG [RS:0;3ab37fa97a98:42327 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef 2024-11-19T08:46:22,836 DEBUG [RS:0;3ab37fa97a98:42327 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42561 2024-11-19T08:46:22,836 DEBUG [RS:0;3ab37fa97a98:42327 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T08:46:22,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T08:46:22,848 DEBUG [RS:0;3ab37fa97a98:42327 {}] zookeeper.ZKUtil(111): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3ab37fa97a98,42327,1732005982345 2024-11-19T08:46:22,849 WARN [RS:0;3ab37fa97a98:42327 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T08:46:22,849 INFO [RS:0;3ab37fa97a98:42327 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:46:22,849 DEBUG [RS:0;3ab37fa97a98:42327 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/WALs/3ab37fa97a98,42327,1732005982345 2024-11-19T08:46:22,849 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3ab37fa97a98,42327,1732005982345] 2024-11-19T08:46:22,854 INFO [RS:0;3ab37fa97a98:42327 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T08:46:22,857 INFO [RS:0;3ab37fa97a98:42327 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T08:46:22,858 INFO [RS:0;3ab37fa97a98:42327 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T08:46:22,858 INFO [RS:0;3ab37fa97a98:42327 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:22,858 INFO [RS:0;3ab37fa97a98:42327 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T08:46:22,859 INFO [RS:0;3ab37fa97a98:42327 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T08:46:22,859 INFO [RS:0;3ab37fa97a98:42327 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:22,860 DEBUG [RS:0;3ab37fa97a98:42327 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:22,860 DEBUG [RS:0;3ab37fa97a98:42327 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:22,860 DEBUG [RS:0;3ab37fa97a98:42327 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:22,860 DEBUG [RS:0;3ab37fa97a98:42327 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:22,860 DEBUG [RS:0;3ab37fa97a98:42327 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:22,860 DEBUG [RS:0;3ab37fa97a98:42327 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3ab37fa97a98:0, corePoolSize=2, maxPoolSize=2 2024-11-19T08:46:22,860 DEBUG [RS:0;3ab37fa97a98:42327 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:22,860 DEBUG [RS:0;3ab37fa97a98:42327 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:22,860 DEBUG [RS:0;3ab37fa97a98:42327 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:22,860 DEBUG [RS:0;3ab37fa97a98:42327 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:22,861 DEBUG [RS:0;3ab37fa97a98:42327 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:22,861 DEBUG [RS:0;3ab37fa97a98:42327 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:22,861 DEBUG [RS:0;3ab37fa97a98:42327 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3ab37fa97a98:0, corePoolSize=3, maxPoolSize=3 2024-11-19T08:46:22,861 DEBUG [RS:0;3ab37fa97a98:42327 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0, corePoolSize=3, maxPoolSize=3 2024-11-19T08:46:22,861 INFO [RS:0;3ab37fa97a98:42327 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:22,861 INFO [RS:0;3ab37fa97a98:42327 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:22,861 INFO [RS:0;3ab37fa97a98:42327 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:22,861 INFO [RS:0;3ab37fa97a98:42327 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:22,861 INFO [RS:0;3ab37fa97a98:42327 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:22,862 INFO [RS:0;3ab37fa97a98:42327 {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,42327,1732005982345-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T08:46:22,878 INFO [RS:0;3ab37fa97a98:42327 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T08:46:22,878 INFO [RS:0;3ab37fa97a98:42327 {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,42327,1732005982345-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:22,878 INFO [RS:0;3ab37fa97a98:42327 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:22,878 INFO [RS:0;3ab37fa97a98:42327 {}] regionserver.Replication(171): 3ab37fa97a98,42327,1732005982345 started 2024-11-19T08:46:22,895 INFO [RS:0;3ab37fa97a98:42327 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:22,895 INFO [RS:0;3ab37fa97a98:42327 {}] regionserver.HRegionServer(1482): Serving as 3ab37fa97a98,42327,1732005982345, RpcServer on 3ab37fa97a98/172.17.0.2:42327, sessionid=0x101538f82a10001 2024-11-19T08:46:22,895 DEBUG [RS:0;3ab37fa97a98:42327 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T08:46:22,896 DEBUG [RS:0;3ab37fa97a98:42327 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3ab37fa97a98,42327,1732005982345 2024-11-19T08:46:22,896 DEBUG [RS:0;3ab37fa97a98:42327 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3ab37fa97a98,42327,1732005982345' 2024-11-19T08:46:22,896 DEBUG [RS:0;3ab37fa97a98:42327 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T08:46:22,896 DEBUG [RS:0;3ab37fa97a98:42327 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T08:46:22,897 DEBUG [RS:0;3ab37fa97a98:42327 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T08:46:22,897 DEBUG [RS:0;3ab37fa97a98:42327 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T08:46:22,897 DEBUG [RS:0;3ab37fa97a98:42327 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3ab37fa97a98,42327,1732005982345 2024-11-19T08:46:22,897 DEBUG [RS:0;3ab37fa97a98:42327 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3ab37fa97a98,42327,1732005982345' 2024-11-19T08:46:22,897 DEBUG [RS:0;3ab37fa97a98:42327 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T08:46:22,898 DEBUG [RS:0;3ab37fa97a98:42327 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T08:46:22,898 DEBUG [RS:0;3ab37fa97a98:42327 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T08:46:22,898 INFO [RS:0;3ab37fa97a98:42327 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T08:46:22,898 INFO [RS:0;3ab37fa97a98:42327 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T08:46:22,943 WARN [3ab37fa97a98:36969 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T08:46:23,002 INFO [RS:0;3ab37fa97a98:42327 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C42327%2C1732005982345, suffix=, logDir=hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/WALs/3ab37fa97a98,42327,1732005982345, archiveDir=hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/oldWALs, maxLogs=32 2024-11-19T08:46:23,006 INFO [RS:0;3ab37fa97a98:42327 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C42327%2C1732005982345.1732005983006 2024-11-19T08:46:23,016 INFO [RS:0;3ab37fa97a98:42327 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/WALs/3ab37fa97a98,42327,1732005982345/3ab37fa97a98%2C42327%2C1732005982345.1732005983006 2024-11-19T08:46:23,017 DEBUG [RS:0;3ab37fa97a98:42327 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42311:42311),(127.0.0.1/127.0.0.1:36503:36503)] 2024-11-19T08:46:23,193 DEBUG [3ab37fa97a98:36969 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T08:46:23,194 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3ab37fa97a98,42327,1732005982345 2024-11-19T08:46:23,197 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3ab37fa97a98,42327,1732005982345, state=OPENING 2024-11-19T08:46:23,262 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T08:46:23,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:23,273 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:23,274 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:46:23,274 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T08:46:23,274 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:46:23,274 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3ab37fa97a98,42327,1732005982345}] 2024-11-19T08:46:23,430 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T08:46:23,433 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52267, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T08:46:23,439 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T08:46:23,439 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:46:23,442 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C42327%2C1732005982345.meta, suffix=.meta, logDir=hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/WALs/3ab37fa97a98,42327,1732005982345, archiveDir=hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/oldWALs, maxLogs=32 2024-11-19T08:46:23,444 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C42327%2C1732005982345.meta.1732005983444.meta 2024-11-19T08:46:23,460 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/WALs/3ab37fa97a98,42327,1732005982345/3ab37fa97a98%2C42327%2C1732005982345.meta.1732005983444.meta 2024-11-19T08:46:23,467 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42311:42311),(127.0.0.1/127.0.0.1:36503:36503)] 2024-11-19T08:46:23,468 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T08:46:23,469 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T08:46:23,469 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T08:46:23,469 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T08:46:23,469 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T08:46:23,469 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:46:23,470 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T08:46:23,470 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T08:46:23,475 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T08:46:23,476 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T08:46:23,476 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:23,477 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:46:23,477 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T08:46:23,478 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T08:46:23,478 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:23,479 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:46:23,479 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T08:46:23,480 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T08:46:23,480 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:23,481 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:46:23,481 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T08:46:23,482 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T08:46:23,482 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:23,483 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:46:23,483 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T08:46:23,484 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/data/hbase/meta/1588230740 2024-11-19T08:46:23,486 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/data/hbase/meta/1588230740 2024-11-19T08:46:23,487 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T08:46:23,487 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T08:46:23,488 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T08:46:23,490 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T08:46:23,491 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=703500, jitterRate=-0.10545475780963898}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T08:46:23,491 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T08:46:23,492 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732005983470Writing region info on filesystem at 1732005983470Initializing all the Stores at 1732005983471 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005983471Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005983474 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732005983474Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005983474Cleaning up temporary data from old regions at 1732005983488 (+14 ms)Running coprocessor post-open hooks at 1732005983491 (+3 ms)Region opened successfully at 1732005983492 (+1 ms) 2024-11-19T08:46:23,493 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732005983430 2024-11-19T08:46:23,497 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T08:46:23,497 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T08:46:23,498 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3ab37fa97a98,42327,1732005982345 2024-11-19T08:46:23,499 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3ab37fa97a98,42327,1732005982345, state=OPEN 2024-11-19T08:46:23,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T08:46:23,539 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T08:46:23,539 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3ab37fa97a98,42327,1732005982345 2024-11-19T08:46:23,539 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:46:23,539 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:46:23,543 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T08:46:23,543 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3ab37fa97a98,42327,1732005982345 in 265 msec 2024-11-19T08:46:23,547 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T08:46:23,547 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 754 msec 2024-11-19T08:46:23,548 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:46:23,548 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T08:46:23,550 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T08:46:23,550 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3ab37fa97a98,42327,1732005982345, seqNum=-1] 2024-11-19T08:46:23,551 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T08:46:23,552 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47077, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T08:46:23,562 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 830 msec 2024-11-19T08:46:23,562 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732005983562, completionTime=-1 2024-11-19T08:46:23,562 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T08:46:23,562 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T08:46:23,564 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T08:46:23,565 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732006043564 2024-11-19T08:46:23,565 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732006103565 2024-11-19T08:46:23,565 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-19T08:46:23,565 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,36969,1732005982191-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:23,565 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,36969,1732005982191-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:23,565 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,36969,1732005982191-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:23,565 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3ab37fa97a98:36969, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:23,565 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:23,566 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:23,568 DEBUG [master/3ab37fa97a98:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T08:46:23,572 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.140sec 2024-11-19T08:46:23,572 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T08:46:23,573 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T08:46:23,573 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T08:46:23,573 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T08:46:23,573 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T08:46:23,573 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,36969,1732005982191-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T08:46:23,573 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,36969,1732005982191-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T08:46:23,576 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T08:46:23,576 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T08:46:23,576 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,36969,1732005982191-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:23,593 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b53d9a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:46:23,593 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3ab37fa97a98,36969,-1 for getting cluster id 2024-11-19T08:46:23,593 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T08:46:23,596 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd4981f0c-e144-47b9-99e2-db4e1881e80a' 2024-11-19T08:46:23,596 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T08:46:23,597 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d4981f0c-e144-47b9-99e2-db4e1881e80a" 2024-11-19T08:46:23,597 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36139299, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:46:23,597 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3ab37fa97a98,36969,-1] 2024-11-19T08:46:23,597 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T08:46:23,598 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:46:23,600 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54768, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T08:46:23,601 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39bdb4ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:46:23,602 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T08:46:23,604 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3ab37fa97a98,42327,1732005982345, seqNum=-1] 2024-11-19T08:46:23,604 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T08:46:23,606 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35838, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T08:46:23,609 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3ab37fa97a98,36969,1732005982191 2024-11-19T08:46:23,610 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:46:23,614 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T08:46:23,615 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T08:46:23,615 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T08:46:23,615 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:46:23,615 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:46:23,616 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:46:23,616 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T08:46:23,616 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T08:46:23,616 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1676223287, stopped=false 2024-11-19T08:46:23,616 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3ab37fa97a98,36969,1732005982191 2024-11-19T08:46:23,631 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T08:46:23,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T08:46:23,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:23,631 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:23,631 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T08:46:23,631 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T08:46:23,632 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:46:23,632 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:46:23,632 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3ab37fa97a98,42327,1732005982345' ***** 2024-11-19T08:46:23,632 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T08:46:23,632 INFO [RS:0;3ab37fa97a98:42327 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T08:46:23,632 INFO [RS:0;3ab37fa97a98:42327 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T08:46:23,632 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T08:46:23,632 INFO [RS:0;3ab37fa97a98:42327 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T08:46:23,633 INFO [RS:0;3ab37fa97a98:42327 {}] regionserver.HRegionServer(959): stopping server 3ab37fa97a98,42327,1732005982345 2024-11-19T08:46:23,633 INFO [RS:0;3ab37fa97a98:42327 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T08:46:23,633 INFO [RS:0;3ab37fa97a98:42327 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3ab37fa97a98:42327. 2024-11-19T08:46:23,633 DEBUG [RS:0;3ab37fa97a98:42327 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:46:23,633 DEBUG [RS:0;3ab37fa97a98:42327 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:46:23,633 INFO [RS:0;3ab37fa97a98:42327 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T08:46:23,633 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:46:23,633 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:46:23,634 INFO [RS:0;3ab37fa97a98:42327 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T08:46:23,634 INFO [RS:0;3ab37fa97a98:42327 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T08:46:23,635 INFO [RS:0;3ab37fa97a98:42327 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T08:46:23,635 INFO [RS:0;3ab37fa97a98:42327 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-19T08:46:23,635 DEBUG [RS:0;3ab37fa97a98:42327 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-19T08:46:23,635 DEBUG [RS:0;3ab37fa97a98:42327 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-19T08:46:23,635 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T08:46:23,635 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T08:46:23,635 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T08:46:23,635 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T08:46:23,635 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T08:46:23,636 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-19T08:46:23,661 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/data/hbase/meta/1588230740/.tmp/ns/8d206525dac24e48a595af61b820d932 is 43, key is default/ns:d/1732005983553/Put/seqid=0 2024-11-19T08:46:23,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741835_1011 (size=5153) 2024-11-19T08:46:23,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41941 is added to blk_1073741835_1011 (size=5153) 2024-11-19T08:46:23,670 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/data/hbase/meta/1588230740/.tmp/ns/8d206525dac24e48a595af61b820d932 2024-11-19T08:46:23,681 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/data/hbase/meta/1588230740/.tmp/ns/8d206525dac24e48a595af61b820d932 as hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/data/hbase/meta/1588230740/ns/8d206525dac24e48a595af61b820d932 2024-11-19T08:46:23,690 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/data/hbase/meta/1588230740/ns/8d206525dac24e48a595af61b820d932, entries=2, sequenceid=6, filesize=5.0 K 2024-11-19T08:46:23,691 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 56ms, sequenceid=6, compaction requested=false 2024-11-19T08:46:23,691 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T08:46:23,698 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-19T08:46:23,699 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T08:46:23,699 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T08:46:23,700 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732005983635Running coprocessor pre-close hooks at 1732005983635Disabling compacts and flushes for region at 1732005983635Disabling writes for close at 1732005983635Obtaining lock to block concurrent updates at 1732005983636 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732005983636Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732005983636Flushing stores of hbase:meta,,1.1588230740 at 1732005983637 (+1 ms)Flushing 1588230740/ns: creating writer at 1732005983637Flushing 1588230740/ns: appending metadata at 1732005983660 (+23 ms)Flushing 1588230740/ns: closing flushed file at 1732005983660Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3cc61758: reopening flushed file at 1732005983680 (+20 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 56ms, sequenceid=6, compaction requested=false at 1732005983691 (+11 ms)Writing region close event to WAL at 1732005983693 (+2 ms)Running coprocessor post-close hooks at 1732005983699 (+6 ms)Closed at 1732005983699 2024-11-19T08:46:23,700 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T08:46:23,809 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:46:23,817 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:46:23,835 INFO [RS:0;3ab37fa97a98:42327 {}] regionserver.HRegionServer(976): stopping server 3ab37fa97a98,42327,1732005982345; all regions closed. 2024-11-19T08:46:23,836 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:23,836 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:23,836 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:23,836 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:23,836 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:23,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41941 is added to blk_1073741834_1010 (size=1152) 2024-11-19T08:46:23,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741834_1010 (size=1152) 2024-11-19T08:46:23,842 DEBUG [RS:0;3ab37fa97a98:42327 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/oldWALs 2024-11-19T08:46:23,842 INFO [RS:0;3ab37fa97a98:42327 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3ab37fa97a98%2C42327%2C1732005982345.meta:.meta(num 1732005983444) 2024-11-19T08:46:23,842 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:23,842 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:23,842 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:23,842 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:23,843 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:23,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741833_1009 (size=93) 2024-11-19T08:46:23,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41941 is added to blk_1073741833_1009 (size=93) 2024-11-19T08:46:23,848 DEBUG [RS:0;3ab37fa97a98:42327 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/oldWALs 2024-11-19T08:46:23,848 INFO [RS:0;3ab37fa97a98:42327 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3ab37fa97a98%2C42327%2C1732005982345:(num 1732005983006) 2024-11-19T08:46:23,848 DEBUG [RS:0;3ab37fa97a98:42327 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:46:23,848 INFO [RS:0;3ab37fa97a98:42327 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T08:46:23,849 INFO [RS:0;3ab37fa97a98:42327 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T08:46:23,849 INFO [RS:0;3ab37fa97a98:42327 {}] hbase.ChoreService(370): Chore service for: regionserver/3ab37fa97a98:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T08:46:23,849 INFO [RS:0;3ab37fa97a98:42327 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T08:46:23,849 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T08:46:23,849 INFO [RS:0;3ab37fa97a98:42327 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42327 2024-11-19T08:46:23,881 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3ab37fa97a98,42327,1732005982345 2024-11-19T08:46:23,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T08:46:23,881 INFO [RS:0;3ab37fa97a98:42327 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T08:46:23,889 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3ab37fa97a98,42327,1732005982345] 2024-11-19T08:46:23,897 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3ab37fa97a98,42327,1732005982345 already deleted, retry=false 2024-11-19T08:46:23,898 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3ab37fa97a98,42327,1732005982345 expired; onlineServers=0 2024-11-19T08:46:23,898 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3ab37fa97a98,36969,1732005982191' ***** 2024-11-19T08:46:23,898 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T08:46:23,898 INFO [M:0;3ab37fa97a98:36969 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T08:46:23,898 INFO [M:0;3ab37fa97a98:36969 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T08:46:23,898 DEBUG [M:0;3ab37fa97a98:36969 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T08:46:23,898 DEBUG [M:0;3ab37fa97a98:36969 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T08:46:23,898 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T08:46:23,898 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732005982737 {}] cleaner.HFileCleaner(306): Exit Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732005982737,5,FailOnTimeoutGroup] 2024-11-19T08:46:23,898 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732005982737 {}] cleaner.HFileCleaner(306): Exit Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732005982737,5,FailOnTimeoutGroup] 2024-11-19T08:46:23,898 INFO [M:0;3ab37fa97a98:36969 {}] hbase.ChoreService(370): Chore service for: master/3ab37fa97a98:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T08:46:23,899 INFO [M:0;3ab37fa97a98:36969 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T08:46:23,899 DEBUG [M:0;3ab37fa97a98:36969 {}] master.HMaster(1795): Stopping service threads 2024-11-19T08:46:23,899 INFO [M:0;3ab37fa97a98:36969 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T08:46:23,899 INFO [M:0;3ab37fa97a98:36969 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T08:46:23,899 INFO [M:0;3ab37fa97a98:36969 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T08:46:23,899 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T08:46:23,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T08:46:23,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:23,906 DEBUG [M:0;3ab37fa97a98:36969 {}] zookeeper.ZKUtil(347): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T08:46:23,906 WARN [M:0;3ab37fa97a98:36969 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T08:46:23,907 INFO [M:0;3ab37fa97a98:36969 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/.lastflushedseqids 2024-11-19T08:46:23,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741836_1012 (size=99) 2024-11-19T08:46:23,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41941 is added to blk_1073741836_1012 (size=99) 2024-11-19T08:46:23,921 INFO [M:0;3ab37fa97a98:36969 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T08:46:23,921 INFO [M:0;3ab37fa97a98:36969 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T08:46:23,921 DEBUG [M:0;3ab37fa97a98:36969 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T08:46:23,921 INFO [M:0;3ab37fa97a98:36969 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:46:23,921 DEBUG [M:0;3ab37fa97a98:36969 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:46:23,921 DEBUG [M:0;3ab37fa97a98:36969 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T08:46:23,921 DEBUG [M:0;3ab37fa97a98:36969 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:46:23,921 INFO [M:0;3ab37fa97a98:36969 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-19T08:46:23,944 DEBUG [M:0;3ab37fa97a98:36969 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0fa95bb427c344d9a2cb0d214be11b94 is 82, key is hbase:meta,,1/info:regioninfo/1732005983498/Put/seqid=0 2024-11-19T08:46:23,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41941 is added to blk_1073741837_1013 (size=5672) 2024-11-19T08:46:23,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741837_1013 (size=5672) 2024-11-19T08:46:23,954 INFO [M:0;3ab37fa97a98:36969 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0fa95bb427c344d9a2cb0d214be11b94 2024-11-19T08:46:23,979 DEBUG [M:0;3ab37fa97a98:36969 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e0626cb772654db4940797965dacac23 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732005983560/Put/seqid=0 2024-11-19T08:46:23,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741838_1014 (size=5275) 2024-11-19T08:46:23,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41941 is added to blk_1073741838_1014 (size=5275) 2024-11-19T08:46:23,986 INFO [M:0;3ab37fa97a98:36969 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e0626cb772654db4940797965dacac23 2024-11-19T08:46:23,989 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:46:23,989 INFO [RS:0;3ab37fa97a98:42327 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T08:46:23,989 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42327-0x101538f82a10001, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:46:23,989 INFO [RS:0;3ab37fa97a98:42327 {}] regionserver.HRegionServer(1031): Exiting; stopping=3ab37fa97a98,42327,1732005982345; zookeeper connection closed. 2024-11-19T08:46:23,990 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@46050e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@46050e 2024-11-19T08:46:23,990 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T08:46:24,010 DEBUG [M:0;3ab37fa97a98:36969 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/859faffc6b18433b8b89ae2c69acab7d is 69, key is 3ab37fa97a98,42327,1732005982345/rs:state/1732005982834/Put/seqid=0 2024-11-19T08:46:24,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741839_1015 (size=5156) 2024-11-19T08:46:24,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41941 is added to blk_1073741839_1015 (size=5156) 2024-11-19T08:46:24,022 INFO [M:0;3ab37fa97a98:36969 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/859faffc6b18433b8b89ae2c69acab7d 2024-11-19T08:46:24,047 DEBUG [M:0;3ab37fa97a98:36969 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aeeb4b7e467f4dea9de2dc2b056a8f08 is 52, key is load_balancer_on/state:d/1732005983613/Put/seqid=0 2024-11-19T08:46:24,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741840_1016 (size=5056) 2024-11-19T08:46:24,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41941 is added to blk_1073741840_1016 (size=5056) 2024-11-19T08:46:24,336 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T08:46:24,345 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:46:24,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:46:24,454 INFO [M:0;3ab37fa97a98:36969 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aeeb4b7e467f4dea9de2dc2b056a8f08 2024-11-19T08:46:24,463 DEBUG [M:0;3ab37fa97a98:36969 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0fa95bb427c344d9a2cb0d214be11b94 as hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0fa95bb427c344d9a2cb0d214be11b94 2024-11-19T08:46:24,472 INFO [M:0;3ab37fa97a98:36969 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0fa95bb427c344d9a2cb0d214be11b94, entries=8, sequenceid=29, filesize=5.5 K 2024-11-19T08:46:24,474 DEBUG [M:0;3ab37fa97a98:36969 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e0626cb772654db4940797965dacac23 as hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e0626cb772654db4940797965dacac23 2024-11-19T08:46:24,482 INFO [M:0;3ab37fa97a98:36969 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e0626cb772654db4940797965dacac23, entries=3, sequenceid=29, filesize=5.2 K 2024-11-19T08:46:24,483 DEBUG [M:0;3ab37fa97a98:36969 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/859faffc6b18433b8b89ae2c69acab7d as hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/859faffc6b18433b8b89ae2c69acab7d 2024-11-19T08:46:24,490 INFO [M:0;3ab37fa97a98:36969 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/859faffc6b18433b8b89ae2c69acab7d, entries=1, sequenceid=29, filesize=5.0 K 2024-11-19T08:46:24,492 DEBUG [M:0;3ab37fa97a98:36969 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aeeb4b7e467f4dea9de2dc2b056a8f08 as hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/aeeb4b7e467f4dea9de2dc2b056a8f08 2024-11-19T08:46:24,501 INFO [M:0;3ab37fa97a98:36969 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42561/user/jenkins/test-data/4bea2e69-5598-52c0-85fe-037044c8afef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/aeeb4b7e467f4dea9de2dc2b056a8f08, entries=1, sequenceid=29, filesize=4.9 K 2024-11-19T08:46:24,502 INFO [M:0;3ab37fa97a98:36969 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 581ms, sequenceid=29, compaction requested=false 2024-11-19T08:46:24,510 INFO [M:0;3ab37fa97a98:36969 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:46:24,510 DEBUG [M:0;3ab37fa97a98:36969 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732005983921Disabling compacts and flushes for region at 1732005983921Disabling writes for close at 1732005983921Obtaining lock to block concurrent updates at 1732005983921Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732005983921Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732005983922 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732005983923 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732005983923Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732005983944 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732005983944Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732005983960 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732005983978 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732005983978Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732005983993 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732005984010 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732005984010Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732005984030 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732005984047 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732005984047Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f33c51e: reopening flushed file at 1732005984462 (+415 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@fefbec6: reopening flushed file at 1732005984472 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2ad9852f: reopening flushed file at 1732005984482 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42d2195d: reopening flushed file at 1732005984491 (+9 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 581ms, sequenceid=29, compaction requested=false at 1732005984502 (+11 ms)Writing region close event to WAL at 1732005984509 (+7 ms)Closed at 1732005984510 (+1 ms) 2024-11-19T08:46:24,511 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:24,511 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:24,511 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:24,511 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:24,511 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:24,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33257 is added to blk_1073741830_1006 (size=10311) 2024-11-19T08:46:24,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41941 is added to blk_1073741830_1006 (size=10311) 2024-11-19T08:46:24,515 INFO [M:0;3ab37fa97a98:36969 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T08:46:24,515 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T08:46:24,515 INFO [M:0;3ab37fa97a98:36969 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36969 2024-11-19T08:46:24,515 INFO [M:0;3ab37fa97a98:36969 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T08:46:24,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:46:24,664 INFO [M:0;3ab37fa97a98:36969 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T08:46:24,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36969-0x101538f82a10000, quorum=127.0.0.1:61536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:46:24,667 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@32b5a1b3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:46:24,667 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@76b25f02{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:46:24,668 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:46:24,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b589a7d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:46:24,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4fcb1c4b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/hadoop.log.dir/,STOPPED} 2024-11-19T08:46:24,670 WARN [BP-1390567527-172.17.0.2-1732005980317 heartbeating to localhost/127.0.0.1:42561 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:46:24,670 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:46:24,670 WARN [BP-1390567527-172.17.0.2-1732005980317 heartbeating to localhost/127.0.0.1:42561 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1390567527-172.17.0.2-1732005980317 (Datanode Uuid 70f2f9df-545e-45de-bbb9-b83959b95b4c) service to localhost/127.0.0.1:42561 2024-11-19T08:46:24,670 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:46:24,671 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/cluster_d153661b-3be7-294b-d233-f9ea5100725b/data/data3/current/BP-1390567527-172.17.0.2-1732005980317 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:46:24,671 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/cluster_d153661b-3be7-294b-d233-f9ea5100725b/data/data4/current/BP-1390567527-172.17.0.2-1732005980317 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:46:24,672 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:46:24,674 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2377404e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:46:24,674 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3d659f5b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:46:24,674 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:46:24,674 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3590efb4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:46:24,675 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@33e82987{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/hadoop.log.dir/,STOPPED} 2024-11-19T08:46:24,676 WARN [BP-1390567527-172.17.0.2-1732005980317 heartbeating to localhost/127.0.0.1:42561 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:46:24,676 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:46:24,676 WARN [BP-1390567527-172.17.0.2-1732005980317 heartbeating to localhost/127.0.0.1:42561 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1390567527-172.17.0.2-1732005980317 (Datanode Uuid 3e9e5f94-798c-4260-ad92-c80e278dea43) service to localhost/127.0.0.1:42561 2024-11-19T08:46:24,676 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:46:24,677 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/cluster_d153661b-3be7-294b-d233-f9ea5100725b/data/data1/current/BP-1390567527-172.17.0.2-1732005980317 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:46:24,677 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/cluster_d153661b-3be7-294b-d233-f9ea5100725b/data/data2/current/BP-1390567527-172.17.0.2-1732005980317 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:46:24,677 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:46:24,682 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6db7bfac{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T08:46:24,683 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5f2bc681{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:46:24,683 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:46:24,683 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@124e4130{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:46:24,683 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ea6e47a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/hadoop.log.dir/,STOPPED} 2024-11-19T08:46:24,689 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T08:46:24,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T08:46:24,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T08:46:24,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/hadoop.log.dir so I do NOT create it in target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c 2024-11-19T08:46:24,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b324852b-78c6-c383-386f-50842ddd008a/hadoop.tmp.dir so I do NOT create it in target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c 2024-11-19T08:46:24,708 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88, deleteOnExit=true 2024-11-19T08:46:24,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T08:46:24,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/test.cache.data in system properties and HBase conf 2024-11-19T08:46:24,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T08:46:24,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/hadoop.log.dir in system properties and HBase conf 2024-11-19T08:46:24,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T08:46:24,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T08:46:24,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T08:46:24,709 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T08:46:24,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T08:46:24,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T08:46:24,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T08:46:24,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T08:46:24,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T08:46:24,710 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T08:46:24,710 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T08:46:24,710 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T08:46:24,710 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T08:46:24,710 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/nfs.dump.dir in system properties and HBase conf 2024-11-19T08:46:24,710 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/java.io.tmpdir in system properties and HBase conf 2024-11-19T08:46:24,710 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T08:46:24,710 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T08:46:24,710 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T08:46:24,724 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T08:46:24,862 INFO [regionserver/3ab37fa97a98:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T08:46:24,996 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:46:25,003 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:46:25,007 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:46:25,008 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:46:25,008 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T08:46:25,008 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:46:25,009 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@342817d8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:46:25,009 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78100011{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:46:25,118 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e477d02{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/java.io.tmpdir/jetty-localhost-43589-hadoop-hdfs-3_4_1-tests_jar-_-any-1148644443528117828/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T08:46:25,119 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@789b23ca{HTTP/1.1, (http/1.1)}{localhost:43589} 2024-11-19T08:46:25,119 INFO [Time-limited test {}] server.Server(415): Started @109647ms 2024-11-19T08:46:25,132 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T08:46:25,340 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:46:25,346 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:46:25,347 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:46:25,347 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:46:25,347 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T08:46:25,347 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7359eab1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:46:25,348 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@225c4391{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:46:25,459 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@10f6ef1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/java.io.tmpdir/jetty-localhost-44901-hadoop-hdfs-3_4_1-tests_jar-_-any-17522607402194036744/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:46:25,459 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@c5ffb55{HTTP/1.1, (http/1.1)}{localhost:44901} 2024-11-19T08:46:25,459 INFO [Time-limited test {}] server.Server(415): Started @109987ms 2024-11-19T08:46:25,461 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:46:25,494 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:46:25,499 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:46:25,500 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:46:25,500 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:46:25,500 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T08:46:25,501 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70de2eeb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:46:25,501 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@779704d3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:46:25,612 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ab51772{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/java.io.tmpdir/jetty-localhost-44085-hadoop-hdfs-3_4_1-tests_jar-_-any-7086156500984316666/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:46:25,612 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@596c0e8d{HTTP/1.1, (http/1.1)}{localhost:44085} 2024-11-19T08:46:25,612 INFO [Time-limited test {}] server.Server(415): Started @110140ms 2024-11-19T08:46:25,614 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:46:26,249 WARN [Thread-672 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data1/current/BP-571785619-172.17.0.2-1732005984737/current, will proceed with Du for space computation calculation, 2024-11-19T08:46:26,249 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data2/current/BP-571785619-172.17.0.2-1732005984737/current, will proceed with Du for space computation calculation, 2024-11-19T08:46:26,272 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:46:26,274 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b20080cffa35844 with lease ID 0x8808a5ba8148fe1e: Processing first storage report for DS-e4ec5e9e-650a-43c4-978d-2d8429898426 from datanode DatanodeRegistration(127.0.0.1:46153, datanodeUuid=ddb8880e-f7de-4466-b1b2-c9a2b0e56f7a, infoPort=38759, infoSecurePort=0, ipcPort=35143, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737) 2024-11-19T08:46:26,274 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b20080cffa35844 with lease ID 0x8808a5ba8148fe1e: from storage DS-e4ec5e9e-650a-43c4-978d-2d8429898426 node DatanodeRegistration(127.0.0.1:46153, datanodeUuid=ddb8880e-f7de-4466-b1b2-c9a2b0e56f7a, infoPort=38759, infoSecurePort=0, ipcPort=35143, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T08:46:26,275 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b20080cffa35844 with lease ID 0x8808a5ba8148fe1e: Processing first storage report for DS-69bc7625-3099-45ee-b4b9-772d8fadd94b from datanode DatanodeRegistration(127.0.0.1:46153, datanodeUuid=ddb8880e-f7de-4466-b1b2-c9a2b0e56f7a, infoPort=38759, infoSecurePort=0, ipcPort=35143, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737) 2024-11-19T08:46:26,275 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b20080cffa35844 with lease ID 0x8808a5ba8148fe1e: from storage DS-69bc7625-3099-45ee-b4b9-772d8fadd94b node DatanodeRegistration(127.0.0.1:46153, datanodeUuid=ddb8880e-f7de-4466-b1b2-c9a2b0e56f7a, infoPort=38759, infoSecurePort=0, ipcPort=35143, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:46:26,385 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data3/current/BP-571785619-172.17.0.2-1732005984737/current, will proceed with Du for space computation calculation, 2024-11-19T08:46:26,385 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data4/current/BP-571785619-172.17.0.2-1732005984737/current, will proceed with Du for space computation calculation, 2024-11-19T08:46:26,402 WARN [Thread-659 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:46:26,405 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8d98d7aecb78f392 with lease ID 0x8808a5ba8148fe1f: Processing first storage report for DS-af2ac2d2-d5bd-42f8-adc7-234974203b47 from datanode DatanodeRegistration(127.0.0.1:40261, datanodeUuid=f0aa2952-855c-402a-84ee-54458d718d25, infoPort=46125, infoSecurePort=0, ipcPort=34749, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737) 2024-11-19T08:46:26,405 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8d98d7aecb78f392 with lease ID 0x8808a5ba8148fe1f: from storage DS-af2ac2d2-d5bd-42f8-adc7-234974203b47 node DatanodeRegistration(127.0.0.1:40261, datanodeUuid=f0aa2952-855c-402a-84ee-54458d718d25, infoPort=46125, infoSecurePort=0, ipcPort=34749, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:46:26,405 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8d98d7aecb78f392 with lease ID 0x8808a5ba8148fe1f: Processing first storage report for DS-4639319f-0d0f-4458-a7cc-084a95c0d979 from datanode DatanodeRegistration(127.0.0.1:40261, datanodeUuid=f0aa2952-855c-402a-84ee-54458d718d25, infoPort=46125, infoSecurePort=0, ipcPort=34749, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737) 2024-11-19T08:46:26,405 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8d98d7aecb78f392 with lease ID 0x8808a5ba8148fe1f: from storage DS-4639319f-0d0f-4458-a7cc-084a95c0d979 node DatanodeRegistration(127.0.0.1:40261, datanodeUuid=f0aa2952-855c-402a-84ee-54458d718d25, infoPort=46125, infoSecurePort=0, ipcPort=34749, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T08:46:26,462 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c 2024-11-19T08:46:26,470 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/zookeeper_0, clientPort=52018, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T08:46:26,471 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52018 2024-11-19T08:46:26,471 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:46:26,473 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:46:26,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40261 is added to blk_1073741825_1001 (size=7) 2024-11-19T08:46:26,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46153 is added to blk_1073741825_1001 (size=7) 2024-11-19T08:46:26,484 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882 with version=8 2024-11-19T08:46:26,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/hbase-staging 2024-11-19T08:46:26,486 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3ab37fa97a98:0 server-side Connection retries=45 2024-11-19T08:46:26,487 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:46:26,487 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T08:46:26,487 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T08:46:26,487 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:46:26,487 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T08:46:26,487 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T08:46:26,487 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T08:46:26,488 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41505 2024-11-19T08:46:26,490 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41505 connecting to ZooKeeper ensemble=127.0.0.1:52018 2024-11-19T08:46:26,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:415050x0, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T08:46:26,548 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41505-0x101538f93680000 connected 2024-11-19T08:46:26,614 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:46:26,616 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:46:26,619 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:46:26,620 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882, hbase.cluster.distributed=false 2024-11-19T08:46:26,622 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T08:46:26,622 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41505 2024-11-19T08:46:26,622 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41505 2024-11-19T08:46:26,623 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41505 2024-11-19T08:46:26,623 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41505 2024-11-19T08:46:26,623 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41505 2024-11-19T08:46:26,641 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3ab37fa97a98:0 server-side Connection retries=45 2024-11-19T08:46:26,642 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:46:26,642 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T08:46:26,642 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T08:46:26,642 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:46:26,642 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T08:46:26,642 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T08:46:26,642 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T08:46:26,643 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46031 2024-11-19T08:46:26,644 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46031 connecting to ZooKeeper ensemble=127.0.0.1:52018 2024-11-19T08:46:26,645 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:46:26,647 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:46:26,655 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:460310x0, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T08:46:26,656 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:460310x0, quorum=127.0.0.1:52018, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:46:26,656 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46031-0x101538f93680001 connected 2024-11-19T08:46:26,656 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T08:46:26,658 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T08:46:26,659 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46031-0x101538f93680001, quorum=127.0.0.1:52018, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T08:46:26,660 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46031-0x101538f93680001, quorum=127.0.0.1:52018, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T08:46:26,661 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46031 2024-11-19T08:46:26,661 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46031 2024-11-19T08:46:26,661 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46031 2024-11-19T08:46:26,661 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46031 2024-11-19T08:46:26,662 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46031 2024-11-19T08:46:26,675 DEBUG [M:0;3ab37fa97a98:41505 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3ab37fa97a98:41505 2024-11-19T08:46:26,676 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3ab37fa97a98,41505,1732005986486 2024-11-19T08:46:26,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:46:26,687 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46031-0x101538f93680001, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:46:26,688 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3ab37fa97a98,41505,1732005986486 2024-11-19T08:46:26,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:26,697 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46031-0x101538f93680001, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T08:46:26,697 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46031-0x101538f93680001, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:26,698 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T08:46:26,698 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3ab37fa97a98,41505,1732005986486 from backup master directory 2024-11-19T08:46:26,705 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46031-0x101538f93680001, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:46:26,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3ab37fa97a98,41505,1732005986486 2024-11-19T08:46:26,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:46:26,705 WARN [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T08:46:26,705 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3ab37fa97a98,41505,1732005986486 2024-11-19T08:46:26,713 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/hbase.id] with ID: 49adb997-d368-42cd-b66e-e2a3d3374526 2024-11-19T08:46:26,713 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/.tmp/hbase.id 2024-11-19T08:46:26,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46153 is added to blk_1073741826_1002 (size=42) 2024-11-19T08:46:26,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40261 is added to blk_1073741826_1002 (size=42) 2024-11-19T08:46:26,720 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/.tmp/hbase.id]:[hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/hbase.id] 2024-11-19T08:46:26,735 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:46:26,736 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T08:46:26,738 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-19T08:46:26,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:26,747 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46031-0x101538f93680001, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:26,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40261 is added to blk_1073741827_1003 (size=196) 2024-11-19T08:46:26,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46153 is added to blk_1073741827_1003 (size=196) 2024-11-19T08:46:26,755 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T08:46:26,756 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T08:46:26,757 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:46:26,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46153 is added to blk_1073741828_1004 (size=1189) 2024-11-19T08:46:26,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40261 is added to blk_1073741828_1004 (size=1189) 2024-11-19T08:46:26,766 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store 2024-11-19T08:46:26,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46153 is added to blk_1073741829_1005 (size=34) 2024-11-19T08:46:26,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40261 is added to blk_1073741829_1005 (size=34) 2024-11-19T08:46:26,775 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:46:26,776 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T08:46:26,776 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:46:26,776 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:46:26,776 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T08:46:26,776 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:46:26,776 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:46:26,776 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732005986776Disabling compacts and flushes for region at 1732005986776Disabling writes for close at 1732005986776Writing region close event to WAL at 1732005986776Closed at 1732005986776 2024-11-19T08:46:26,777 WARN [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/.initializing 2024-11-19T08:46:26,777 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/WALs/3ab37fa97a98,41505,1732005986486 2024-11-19T08:46:26,781 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C41505%2C1732005986486, suffix=, logDir=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/WALs/3ab37fa97a98,41505,1732005986486, archiveDir=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/oldWALs, maxLogs=10 2024-11-19T08:46:26,782 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C41505%2C1732005986486.1732005986781 2024-11-19T08:46:26,787 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/WALs/3ab37fa97a98,41505,1732005986486/3ab37fa97a98%2C41505%2C1732005986486.1732005986781 2024-11-19T08:46:26,788 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46125:46125),(127.0.0.1/127.0.0.1:38759:38759)] 2024-11-19T08:46:26,789 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T08:46:26,789 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:46:26,789 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:26,789 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:26,791 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:26,793 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T08:46:26,793 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:26,794 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:46:26,794 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:26,795 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T08:46:26,795 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:26,796 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:46:26,796 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:26,798 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T08:46:26,798 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:26,798 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:46:26,799 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:26,800 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T08:46:26,800 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:26,801 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:46:26,801 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:26,801 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:26,802 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:26,803 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:26,803 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:26,804 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T08:46:26,805 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:46:26,808 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T08:46:26,809 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=705516, jitterRate=-0.10289084911346436}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T08:46:26,809 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732005986789Initializing all the Stores at 1732005986790 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005986790Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732005986791 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732005986791Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732005986791Cleaning up temporary data from old regions at 1732005986803 (+12 ms)Region opened successfully at 1732005986809 (+6 ms) 2024-11-19T08:46:26,810 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T08:46:26,814 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62a5cb96, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3ab37fa97a98/172.17.0.2:0 2024-11-19T08:46:26,815 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T08:46:26,816 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T08:46:26,816 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T08:46:26,816 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T08:46:26,817 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T08:46:26,817 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T08:46:26,817 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T08:46:26,820 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T08:46:26,821 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T08:46:26,837 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T08:46:26,837 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T08:46:26,838 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T08:46:26,847 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T08:46:26,848 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T08:46:26,849 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T08:46:26,855 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T08:46:26,857 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T08:46:26,864 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T08:46:26,867 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T08:46:26,878 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T08:46:26,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T08:46:26,889 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46031-0x101538f93680001, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T08:46:26,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:26,889 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46031-0x101538f93680001, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:26,889 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3ab37fa97a98,41505,1732005986486, sessionid=0x101538f93680000, setting cluster-up flag (Was=false) 2024-11-19T08:46:26,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:26,930 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T08:46:26,932 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3ab37fa97a98,41505,1732005986486 2024-11-19T08:46:26,947 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46031-0x101538f93680001, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:26,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:26,972 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T08:46:26,973 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3ab37fa97a98,41505,1732005986486 2024-11-19T08:46:26,975 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T08:46:26,977 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T08:46:26,977 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T08:46:26,977 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T08:46:26,977 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3ab37fa97a98,41505,1732005986486 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T08:46:26,979 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:46:26,979 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:46:26,980 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:46:26,980 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:46:26,980 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3ab37fa97a98:0, corePoolSize=10, maxPoolSize=10 2024-11-19T08:46:26,980 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:26,980 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=2, maxPoolSize=2 2024-11-19T08:46:26,980 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:26,983 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:46:26,983 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T08:46:26,985 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:26,985 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T08:46:26,988 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732006016988 2024-11-19T08:46:26,988 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T08:46:26,988 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T08:46:26,989 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T08:46:26,989 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T08:46:26,989 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T08:46:26,989 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T08:46:26,989 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:26,989 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T08:46:26,990 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T08:46:26,990 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T08:46:26,990 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T08:46:26,990 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T08:46:26,990 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732005986990,5,FailOnTimeoutGroup] 2024-11-19T08:46:26,990 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732005986990,5,FailOnTimeoutGroup] 2024-11-19T08:46:26,990 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:26,990 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T08:46:26,991 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:26,991 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:26,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40261 is added to blk_1073741831_1007 (size=1321) 2024-11-19T08:46:26,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46153 is added to blk_1073741831_1007 (size=1321) 2024-11-19T08:46:27,064 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.HRegionServer(746): ClusterId : 49adb997-d368-42cd-b66e-e2a3d3374526 2024-11-19T08:46:27,064 DEBUG [RS:0;3ab37fa97a98:46031 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T08:46:27,098 DEBUG [RS:0;3ab37fa97a98:46031 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T08:46:27,098 DEBUG [RS:0;3ab37fa97a98:46031 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T08:46:27,106 DEBUG [RS:0;3ab37fa97a98:46031 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T08:46:27,107 DEBUG [RS:0;3ab37fa97a98:46031 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d5ba6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3ab37fa97a98/172.17.0.2:0 2024-11-19T08:46:27,119 DEBUG [RS:0;3ab37fa97a98:46031 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3ab37fa97a98:46031 2024-11-19T08:46:27,119 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T08:46:27,119 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T08:46:27,119 DEBUG [RS:0;3ab37fa97a98:46031 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T08:46:27,120 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.HRegionServer(2659): reportForDuty to master=3ab37fa97a98,41505,1732005986486 with port=46031, startcode=1732005986641 2024-11-19T08:46:27,120 DEBUG [RS:0;3ab37fa97a98:46031 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T08:46:27,123 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56521, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T08:46:27,123 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41505 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3ab37fa97a98,46031,1732005986641 2024-11-19T08:46:27,123 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41505 {}] master.ServerManager(517): Registering regionserver=3ab37fa97a98,46031,1732005986641 2024-11-19T08:46:27,126 DEBUG [RS:0;3ab37fa97a98:46031 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882 2024-11-19T08:46:27,126 DEBUG [RS:0;3ab37fa97a98:46031 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36955 2024-11-19T08:46:27,126 DEBUG [RS:0;3ab37fa97a98:46031 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T08:46:27,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T08:46:27,137 DEBUG [RS:0;3ab37fa97a98:46031 {}] zookeeper.ZKUtil(111): regionserver:46031-0x101538f93680001, quorum=127.0.0.1:52018, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3ab37fa97a98,46031,1732005986641 2024-11-19T08:46:27,137 WARN [RS:0;3ab37fa97a98:46031 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T08:46:27,137 INFO [RS:0;3ab37fa97a98:46031 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:46:27,138 DEBUG [RS:0;3ab37fa97a98:46031 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641 2024-11-19T08:46:27,138 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3ab37fa97a98,46031,1732005986641] 2024-11-19T08:46:27,142 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T08:46:27,144 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T08:46:27,146 INFO [RS:0;3ab37fa97a98:46031 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T08:46:27,147 INFO [RS:0;3ab37fa97a98:46031 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:27,147 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T08:46:27,148 INFO [RS:0;3ab37fa97a98:46031 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T08:46:27,148 INFO [RS:0;3ab37fa97a98:46031 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:27,148 DEBUG [RS:0;3ab37fa97a98:46031 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:27,148 DEBUG [RS:0;3ab37fa97a98:46031 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:27,148 DEBUG [RS:0;3ab37fa97a98:46031 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:27,148 DEBUG [RS:0;3ab37fa97a98:46031 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:27,149 DEBUG [RS:0;3ab37fa97a98:46031 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:27,149 DEBUG [RS:0;3ab37fa97a98:46031 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3ab37fa97a98:0, corePoolSize=2, maxPoolSize=2 2024-11-19T08:46:27,149 DEBUG [RS:0;3ab37fa97a98:46031 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:27,149 DEBUG [RS:0;3ab37fa97a98:46031 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:27,149 DEBUG [RS:0;3ab37fa97a98:46031 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:27,149 DEBUG [RS:0;3ab37fa97a98:46031 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:27,149 DEBUG [RS:0;3ab37fa97a98:46031 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:27,149 DEBUG [RS:0;3ab37fa97a98:46031 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:27,149 DEBUG [RS:0;3ab37fa97a98:46031 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3ab37fa97a98:0, corePoolSize=3, maxPoolSize=3 2024-11-19T08:46:27,149 DEBUG [RS:0;3ab37fa97a98:46031 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0, corePoolSize=3, maxPoolSize=3 2024-11-19T08:46:27,150 INFO [RS:0;3ab37fa97a98:46031 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:27,150 INFO [RS:0;3ab37fa97a98:46031 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:27,150 INFO [RS:0;3ab37fa97a98:46031 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:27,151 INFO [RS:0;3ab37fa97a98:46031 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:27,151 INFO [RS:0;3ab37fa97a98:46031 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:27,151 INFO [RS:0;3ab37fa97a98:46031 {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,46031,1732005986641-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T08:46:27,171 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T08:46:27,172 INFO [RS:0;3ab37fa97a98:46031 {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,46031,1732005986641-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:27,172 INFO [RS:0;3ab37fa97a98:46031 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:27,172 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.Replication(171): 3ab37fa97a98,46031,1732005986641 started 2024-11-19T08:46:27,188 INFO [RS:0;3ab37fa97a98:46031 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:27,188 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.HRegionServer(1482): Serving as 3ab37fa97a98,46031,1732005986641, RpcServer on 3ab37fa97a98/172.17.0.2:46031, sessionid=0x101538f93680001 2024-11-19T08:46:27,188 DEBUG [RS:0;3ab37fa97a98:46031 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T08:46:27,188 DEBUG [RS:0;3ab37fa97a98:46031 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3ab37fa97a98,46031,1732005986641 2024-11-19T08:46:27,189 DEBUG [RS:0;3ab37fa97a98:46031 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3ab37fa97a98,46031,1732005986641' 2024-11-19T08:46:27,189 DEBUG [RS:0;3ab37fa97a98:46031 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T08:46:27,189 DEBUG [RS:0;3ab37fa97a98:46031 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T08:46:27,190 DEBUG [RS:0;3ab37fa97a98:46031 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T08:46:27,190 DEBUG [RS:0;3ab37fa97a98:46031 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T08:46:27,190 DEBUG [RS:0;3ab37fa97a98:46031 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3ab37fa97a98,46031,1732005986641 2024-11-19T08:46:27,190 DEBUG [RS:0;3ab37fa97a98:46031 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3ab37fa97a98,46031,1732005986641' 2024-11-19T08:46:27,190 DEBUG [RS:0;3ab37fa97a98:46031 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T08:46:27,190 DEBUG [RS:0;3ab37fa97a98:46031 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T08:46:27,190 DEBUG [RS:0;3ab37fa97a98:46031 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T08:46:27,191 INFO [RS:0;3ab37fa97a98:46031 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T08:46:27,191 INFO [RS:0;3ab37fa97a98:46031 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T08:46:27,294 INFO [RS:0;3ab37fa97a98:46031 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C46031%2C1732005986641, suffix=, logDir=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641, archiveDir=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/oldWALs, maxLogs=32 2024-11-19T08:46:27,296 INFO [RS:0;3ab37fa97a98:46031 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C46031%2C1732005986641.1732005987295 2024-11-19T08:46:27,303 INFO [RS:0;3ab37fa97a98:46031 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005987295 2024-11-19T08:46:27,304 DEBUG [RS:0;3ab37fa97a98:46031 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38759:38759),(127.0.0.1/127.0.0.1:46125:46125)] 2024-11-19T08:46:27,396 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T08:46:27,396 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882 2024-11-19T08:46:27,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46153 is added to blk_1073741833_1009 (size=32) 2024-11-19T08:46:27,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40261 is added to blk_1073741833_1009 (size=32) 2024-11-19T08:46:27,408 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:46:27,409 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T08:46:27,433 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T08:46:27,433 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:27,434 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:46:27,434 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T08:46:27,435 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T08:46:27,436 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:27,436 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:46:27,436 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T08:46:27,438 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T08:46:27,438 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:27,438 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:46:27,438 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T08:46:27,440 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T08:46:27,440 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:27,440 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:46:27,441 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T08:46:27,441 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740 2024-11-19T08:46:27,442 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740 2024-11-19T08:46:27,443 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T08:46:27,443 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T08:46:27,444 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T08:46:27,445 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T08:46:27,447 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T08:46:27,448 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=723746, jitterRate=-0.07971030473709106}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T08:46:27,448 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732005987408Initializing all the Stores at 1732005987409 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005987409Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005987409Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732005987409Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005987409Cleaning up temporary data from old regions at 1732005987443 (+34 ms)Region opened successfully at 1732005987448 (+5 ms) 2024-11-19T08:46:27,448 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T08:46:27,448 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T08:46:27,448 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T08:46:27,448 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T08:46:27,449 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T08:46:27,449 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T08:46:27,449 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732005987448Disabling compacts and flushes for region at 1732005987448Disabling writes for close at 1732005987449 (+1 ms)Writing region close event to WAL at 1732005987449Closed at 1732005987449 2024-11-19T08:46:27,451 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:46:27,451 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T08:46:27,451 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T08:46:27,452 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T08:46:27,454 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T08:46:27,604 DEBUG [3ab37fa97a98:41505 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T08:46:27,605 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3ab37fa97a98,46031,1732005986641 2024-11-19T08:46:27,607 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3ab37fa97a98,46031,1732005986641, state=OPENING 2024-11-19T08:46:27,662 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T08:46:27,672 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46031-0x101538f93680001, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:27,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:46:27,673 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T08:46:27,673 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3ab37fa97a98,46031,1732005986641}] 2024-11-19T08:46:27,673 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:46:27,673 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:46:27,828 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T08:46:27,832 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35905, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T08:46:27,837 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T08:46:27,837 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:46:27,840 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C46031%2C1732005986641.meta, suffix=.meta, logDir=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641, archiveDir=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/oldWALs, maxLogs=32 2024-11-19T08:46:27,841 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta 2024-11-19T08:46:27,846 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta 2024-11-19T08:46:27,847 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38759:38759),(127.0.0.1/127.0.0.1:46125:46125)] 2024-11-19T08:46:27,848 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T08:46:27,848 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T08:46:27,848 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T08:46:27,848 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T08:46:27,848 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T08:46:27,849 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:46:27,849 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T08:46:27,849 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T08:46:27,851 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T08:46:27,852 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T08:46:27,852 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:27,852 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:46:27,852 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T08:46:27,853 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T08:46:27,853 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:27,854 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:46:27,854 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T08:46:27,854 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T08:46:27,854 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:27,855 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:46:27,855 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T08:46:27,856 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T08:46:27,856 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:27,856 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:46:27,856 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T08:46:27,857 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740 2024-11-19T08:46:27,859 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740 2024-11-19T08:46:27,860 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T08:46:27,861 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T08:46:27,861 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T08:46:27,863 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T08:46:27,864 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=824772, jitterRate=0.04875299334526062}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T08:46:27,864 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T08:46:27,864 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732005987849Writing region info on filesystem at 1732005987849Initializing all the Stores at 1732005987850 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005987850Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005987850Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732005987850Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732005987850Cleaning up temporary data from old regions at 1732005987861 (+11 ms)Running coprocessor post-open hooks at 1732005987864 (+3 ms)Region opened successfully at 1732005987864 2024-11-19T08:46:27,865 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732005987828 2024-11-19T08:46:27,868 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T08:46:27,868 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T08:46:27,869 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3ab37fa97a98,46031,1732005986641 2024-11-19T08:46:27,870 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3ab37fa97a98,46031,1732005986641, state=OPEN 2024-11-19T08:46:27,935 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46031-0x101538f93680001, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T08:46:27,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T08:46:27,935 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3ab37fa97a98,46031,1732005986641 2024-11-19T08:46:27,935 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:46:27,935 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:46:27,940 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T08:46:27,940 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3ab37fa97a98,46031,1732005986641 in 262 msec 2024-11-19T08:46:27,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T08:46:27,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 489 msec 2024-11-19T08:46:27,944 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:46:27,944 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T08:46:27,945 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T08:46:27,946 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3ab37fa97a98,46031,1732005986641, seqNum=-1] 2024-11-19T08:46:27,946 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T08:46:27,947 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54725, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T08:46:27,954 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 977 msec 2024-11-19T08:46:27,954 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732005987954, completionTime=-1 2024-11-19T08:46:27,954 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T08:46:27,954 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T08:46:27,957 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T08:46:27,957 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732006047957 2024-11-19T08:46:27,957 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732006107957 2024-11-19T08:46:27,957 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-19T08:46:27,957 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,41505,1732005986486-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:27,957 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,41505,1732005986486-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:27,957 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,41505,1732005986486-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:27,958 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3ab37fa97a98:41505, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:27,958 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:27,958 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:27,960 DEBUG [master/3ab37fa97a98:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T08:46:27,962 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.256sec 2024-11-19T08:46:27,962 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T08:46:27,962 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T08:46:27,962 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T08:46:27,962 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T08:46:27,963 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T08:46:27,963 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,41505,1732005986486-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T08:46:27,963 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,41505,1732005986486-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T08:46:27,964 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18dbfa70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:46:27,964 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3ab37fa97a98,41505,-1 for getting cluster id 2024-11-19T08:46:27,964 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T08:46:27,965 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T08:46:27,965 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T08:46:27,966 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,41505,1732005986486-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:27,966 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '49adb997-d368-42cd-b66e-e2a3d3374526' 2024-11-19T08:46:27,966 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T08:46:27,966 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "49adb997-d368-42cd-b66e-e2a3d3374526" 2024-11-19T08:46:27,967 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a72ea29, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:46:27,967 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3ab37fa97a98,41505,-1] 2024-11-19T08:46:27,967 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T08:46:27,967 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:46:27,969 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36314, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T08:46:27,970 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15fefef2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:46:27,970 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T08:46:27,971 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3ab37fa97a98,46031,1732005986641, seqNum=-1] 2024-11-19T08:46:27,972 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T08:46:27,973 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43936, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T08:46:27,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3ab37fa97a98,41505,1732005986486 2024-11-19T08:46:27,975 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:46:27,978 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T08:46:27,998 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3ab37fa97a98:0 server-side Connection retries=45 2024-11-19T08:46:27,998 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:46:27,998 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T08:46:27,999 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T08:46:27,999 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:46:27,999 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T08:46:27,999 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T08:46:27,999 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T08:46:28,000 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36037 2024-11-19T08:46:28,002 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36037 connecting to ZooKeeper ensemble=127.0.0.1:52018 2024-11-19T08:46:28,003 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:46:28,005 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:46:28,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:360370x0, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T08:46:28,029 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-19T08:46:28,029 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:360370x0, quorum=127.0.0.1:52018, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-19T08:46:28,029 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36037-0x101538f93680002 connected 2024-11-19T08:46:28,030 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T08:46:28,031 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T08:46:28,031 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:36037-0x101538f93680002, quorum=127.0.0.1:52018, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T08:46:28,033 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36037-0x101538f93680002, quorum=127.0.0.1:52018, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T08:46:28,033 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36037 2024-11-19T08:46:28,033 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36037 2024-11-19T08:46:28,034 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36037 2024-11-19T08:46:28,039 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36037 2024-11-19T08:46:28,039 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36037 2024-11-19T08:46:28,041 INFO [RS:1;3ab37fa97a98:36037 {}] regionserver.HRegionServer(746): ClusterId : 49adb997-d368-42cd-b66e-e2a3d3374526 2024-11-19T08:46:28,041 DEBUG [RS:1;3ab37fa97a98:36037 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T08:46:28,047 DEBUG [RS:1;3ab37fa97a98:36037 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T08:46:28,047 DEBUG [RS:1;3ab37fa97a98:36037 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T08:46:28,056 DEBUG [RS:1;3ab37fa97a98:36037 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T08:46:28,056 DEBUG [RS:1;3ab37fa97a98:36037 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c56f5f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3ab37fa97a98/172.17.0.2:0 2024-11-19T08:46:28,070 DEBUG [RS:1;3ab37fa97a98:36037 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;3ab37fa97a98:36037 2024-11-19T08:46:28,070 INFO [RS:1;3ab37fa97a98:36037 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T08:46:28,070 INFO [RS:1;3ab37fa97a98:36037 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T08:46:28,070 DEBUG [RS:1;3ab37fa97a98:36037 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T08:46:28,071 INFO [RS:1;3ab37fa97a98:36037 {}] regionserver.HRegionServer(2659): reportForDuty to master=3ab37fa97a98,41505,1732005986486 with port=36037, startcode=1732005987998 2024-11-19T08:46:28,071 DEBUG [RS:1;3ab37fa97a98:36037 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T08:46:28,073 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36573, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T08:46:28,073 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41505 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3ab37fa97a98,36037,1732005987998 2024-11-19T08:46:28,073 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41505 {}] master.ServerManager(517): Registering regionserver=3ab37fa97a98,36037,1732005987998 2024-11-19T08:46:28,075 DEBUG [RS:1;3ab37fa97a98:36037 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882 2024-11-19T08:46:28,075 DEBUG [RS:1;3ab37fa97a98:36037 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36955 2024-11-19T08:46:28,075 DEBUG [RS:1;3ab37fa97a98:36037 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T08:46:28,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T08:46:28,087 DEBUG [RS:1;3ab37fa97a98:36037 {}] zookeeper.ZKUtil(111): regionserver:36037-0x101538f93680002, quorum=127.0.0.1:52018, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3ab37fa97a98,36037,1732005987998 2024-11-19T08:46:28,087 WARN [RS:1;3ab37fa97a98:36037 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T08:46:28,088 INFO [RS:1;3ab37fa97a98:36037 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:46:28,088 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3ab37fa97a98,36037,1732005987998] 2024-11-19T08:46:28,088 DEBUG [RS:1;3ab37fa97a98:36037 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998 2024-11-19T08:46:28,092 INFO [RS:1;3ab37fa97a98:36037 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T08:46:28,094 INFO [RS:1;3ab37fa97a98:36037 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T08:46:28,094 INFO [RS:1;3ab37fa97a98:36037 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T08:46:28,094 INFO [RS:1;3ab37fa97a98:36037 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:28,095 INFO [RS:1;3ab37fa97a98:36037 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T08:46:28,096 INFO [RS:1;3ab37fa97a98:36037 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T08:46:28,096 INFO [RS:1;3ab37fa97a98:36037 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:28,096 DEBUG [RS:1;3ab37fa97a98:36037 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:28,096 DEBUG [RS:1;3ab37fa97a98:36037 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:28,096 DEBUG [RS:1;3ab37fa97a98:36037 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:28,096 DEBUG [RS:1;3ab37fa97a98:36037 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:28,096 DEBUG [RS:1;3ab37fa97a98:36037 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:28,096 DEBUG [RS:1;3ab37fa97a98:36037 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3ab37fa97a98:0, corePoolSize=2, maxPoolSize=2 2024-11-19T08:46:28,096 DEBUG [RS:1;3ab37fa97a98:36037 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:28,097 DEBUG [RS:1;3ab37fa97a98:36037 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:28,097 DEBUG [RS:1;3ab37fa97a98:36037 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:28,097 DEBUG [RS:1;3ab37fa97a98:36037 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:28,097 DEBUG [RS:1;3ab37fa97a98:36037 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:28,097 DEBUG [RS:1;3ab37fa97a98:36037 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:46:28,097 DEBUG [RS:1;3ab37fa97a98:36037 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3ab37fa97a98:0, corePoolSize=3, maxPoolSize=3 2024-11-19T08:46:28,097 DEBUG [RS:1;3ab37fa97a98:36037 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0, corePoolSize=3, maxPoolSize=3 2024-11-19T08:46:28,098 INFO [RS:1;3ab37fa97a98:36037 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:28,098 INFO [RS:1;3ab37fa97a98:36037 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:28,098 INFO [RS:1;3ab37fa97a98:36037 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:28,098 INFO [RS:1;3ab37fa97a98:36037 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:28,098 INFO [RS:1;3ab37fa97a98:36037 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:28,098 INFO [RS:1;3ab37fa97a98:36037 {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,36037,1732005987998-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T08:46:28,115 INFO [RS:1;3ab37fa97a98:36037 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T08:46:28,115 INFO [RS:1;3ab37fa97a98:36037 {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,36037,1732005987998-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:28,115 INFO [RS:1;3ab37fa97a98:36037 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:28,115 INFO [RS:1;3ab37fa97a98:36037 {}] regionserver.Replication(171): 3ab37fa97a98,36037,1732005987998 started 2024-11-19T08:46:28,130 INFO [RS:1;3ab37fa97a98:36037 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:46:28,130 INFO [RS:1;3ab37fa97a98:36037 {}] regionserver.HRegionServer(1482): Serving as 3ab37fa97a98,36037,1732005987998, RpcServer on 3ab37fa97a98/172.17.0.2:36037, sessionid=0x101538f93680002 2024-11-19T08:46:28,130 DEBUG [RS:1;3ab37fa97a98:36037 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T08:46:28,130 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;3ab37fa97a98:36037,5,FailOnTimeoutGroup] 2024-11-19T08:46:28,130 DEBUG [RS:1;3ab37fa97a98:36037 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3ab37fa97a98,36037,1732005987998 2024-11-19T08:46:28,130 DEBUG [RS:1;3ab37fa97a98:36037 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3ab37fa97a98,36037,1732005987998' 2024-11-19T08:46:28,130 DEBUG [RS:1;3ab37fa97a98:36037 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T08:46:28,131 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-19T08:46:28,131 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T08:46:28,131 DEBUG [RS:1;3ab37fa97a98:36037 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T08:46:28,132 DEBUG [RS:1;3ab37fa97a98:36037 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T08:46:28,132 DEBUG [RS:1;3ab37fa97a98:36037 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T08:46:28,132 DEBUG [RS:1;3ab37fa97a98:36037 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3ab37fa97a98,36037,1732005987998 2024-11-19T08:46:28,132 DEBUG [RS:1;3ab37fa97a98:36037 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3ab37fa97a98,36037,1732005987998' 2024-11-19T08:46:28,132 DEBUG [RS:1;3ab37fa97a98:36037 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T08:46:28,132 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 3ab37fa97a98,41505,1732005986486 2024-11-19T08:46:28,132 DEBUG [RS:1;3ab37fa97a98:36037 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T08:46:28,132 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@f09b82f 2024-11-19T08:46:28,133 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T08:46:28,133 DEBUG [RS:1;3ab37fa97a98:36037 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T08:46:28,133 INFO [RS:1;3ab37fa97a98:36037 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T08:46:28,133 INFO [RS:1;3ab37fa97a98:36037 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T08:46:28,135 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36316, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T08:46:28,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41505 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T08:46:28,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41505 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T08:46:28,136 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41505 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T08:46:28,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41505 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-19T08:46:28,139 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T08:46:28,139 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:28,139 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41505 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-19T08:46:28,140 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T08:46:28,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41505 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T08:46:28,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46153 is added to blk_1073741835_1011 (size=393) 2024-11-19T08:46:28,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40261 is added to blk_1073741835_1011 (size=393) 2024-11-19T08:46:28,159 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ffe88a4e91dad7fbc582782eb91f58fc, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882 2024-11-19T08:46:28,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40261 is added to blk_1073741836_1012 (size=76) 2024-11-19T08:46:28,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46153 is added to blk_1073741836_1012 (size=76) 2024-11-19T08:46:28,169 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:46:28,169 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing ffe88a4e91dad7fbc582782eb91f58fc, disabling compactions & flushes 2024-11-19T08:46:28,169 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc. 2024-11-19T08:46:28,169 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc. 2024-11-19T08:46:28,169 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc. after waiting 0 ms 2024-11-19T08:46:28,169 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc. 2024-11-19T08:46:28,169 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc. 2024-11-19T08:46:28,169 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for ffe88a4e91dad7fbc582782eb91f58fc: Waiting for close lock at 1732005988169Disabling compacts and flushes for region at 1732005988169Disabling writes for close at 1732005988169Writing region close event to WAL at 1732005988169Closed at 1732005988169 2024-11-19T08:46:28,171 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T08:46:28,171 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732005988171"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732005988171"}]},"ts":"1732005988171"} 2024-11-19T08:46:28,174 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T08:46:28,175 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T08:46:28,176 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732005988176"}]},"ts":"1732005988176"} 2024-11-19T08:46:28,179 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-19T08:46:28,180 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ffe88a4e91dad7fbc582782eb91f58fc, ASSIGN}] 2024-11-19T08:46:28,181 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ffe88a4e91dad7fbc582782eb91f58fc, ASSIGN 2024-11-19T08:46:28,182 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ffe88a4e91dad7fbc582782eb91f58fc, ASSIGN; state=OFFLINE, location=3ab37fa97a98,46031,1732005986641; forceNewPlan=false, retain=false 2024-11-19T08:46:28,236 INFO [RS:1;3ab37fa97a98:36037 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C36037%2C1732005987998, suffix=, logDir=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998, archiveDir=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/oldWALs, maxLogs=32 2024-11-19T08:46:28,237 INFO [RS:1;3ab37fa97a98:36037 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C36037%2C1732005987998.1732005988237 2024-11-19T08:46:28,245 INFO [RS:1;3ab37fa97a98:36037 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 2024-11-19T08:46:28,246 DEBUG [RS:1;3ab37fa97a98:36037 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38759:38759),(127.0.0.1/127.0.0.1:46125:46125)] 2024-11-19T08:46:28,333 INFO [3ab37fa97a98:41505 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-19T08:46:28,334 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ffe88a4e91dad7fbc582782eb91f58fc, regionState=OPENING, regionLocation=3ab37fa97a98,46031,1732005986641 2024-11-19T08:46:28,336 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ffe88a4e91dad7fbc582782eb91f58fc, ASSIGN because future has completed 2024-11-19T08:46:28,337 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ffe88a4e91dad7fbc582782eb91f58fc, server=3ab37fa97a98,46031,1732005986641}] 2024-11-19T08:46:28,497 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc. 2024-11-19T08:46:28,498 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ffe88a4e91dad7fbc582782eb91f58fc, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc.', STARTKEY => '', ENDKEY => ''} 2024-11-19T08:46:28,499 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath ffe88a4e91dad7fbc582782eb91f58fc 2024-11-19T08:46:28,499 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:46:28,499 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ffe88a4e91dad7fbc582782eb91f58fc 2024-11-19T08:46:28,499 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ffe88a4e91dad7fbc582782eb91f58fc 2024-11-19T08:46:28,501 INFO [StoreOpener-ffe88a4e91dad7fbc582782eb91f58fc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ffe88a4e91dad7fbc582782eb91f58fc 2024-11-19T08:46:28,503 INFO [StoreOpener-ffe88a4e91dad7fbc582782eb91f58fc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ffe88a4e91dad7fbc582782eb91f58fc columnFamilyName info 2024-11-19T08:46:28,503 DEBUG [StoreOpener-ffe88a4e91dad7fbc582782eb91f58fc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:46:28,503 INFO [StoreOpener-ffe88a4e91dad7fbc582782eb91f58fc-1 {}] regionserver.HStore(327): Store=ffe88a4e91dad7fbc582782eb91f58fc/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:46:28,504 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ffe88a4e91dad7fbc582782eb91f58fc 2024-11-19T08:46:28,504 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc 2024-11-19T08:46:28,505 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc 2024-11-19T08:46:28,505 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ffe88a4e91dad7fbc582782eb91f58fc 2024-11-19T08:46:28,505 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ffe88a4e91dad7fbc582782eb91f58fc 2024-11-19T08:46:28,507 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ffe88a4e91dad7fbc582782eb91f58fc 2024-11-19T08:46:28,509 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T08:46:28,510 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ffe88a4e91dad7fbc582782eb91f58fc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=709886, jitterRate=-0.09733422100543976}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T08:46:28,510 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ffe88a4e91dad7fbc582782eb91f58fc 2024-11-19T08:46:28,511 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ffe88a4e91dad7fbc582782eb91f58fc: Running coprocessor pre-open hook at 1732005988499Writing region info on filesystem at 1732005988499Initializing all the Stores at 1732005988500 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732005988501 (+1 ms)Cleaning up temporary data from old regions at 1732005988505 (+4 ms)Running coprocessor post-open hooks at 1732005988510 (+5 ms)Region opened successfully at 1732005988510 2024-11-19T08:46:28,512 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc., pid=6, masterSystemTime=1732005988491 2024-11-19T08:46:28,514 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc. 2024-11-19T08:46:28,514 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc. 2024-11-19T08:46:28,515 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ffe88a4e91dad7fbc582782eb91f58fc, regionState=OPEN, openSeqNum=2, regionLocation=3ab37fa97a98,46031,1732005986641 2024-11-19T08:46:28,517 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ffe88a4e91dad7fbc582782eb91f58fc, server=3ab37fa97a98,46031,1732005986641 because future has completed 2024-11-19T08:46:28,521 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T08:46:28,522 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ffe88a4e91dad7fbc582782eb91f58fc, server=3ab37fa97a98,46031,1732005986641 in 182 msec 2024-11-19T08:46:28,525 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T08:46:28,525 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ffe88a4e91dad7fbc582782eb91f58fc, ASSIGN in 342 msec 2024-11-19T08:46:28,526 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T08:46:28,527 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732005988526"}]},"ts":"1732005988526"} 2024-11-19T08:46:28,529 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-19T08:46:28,531 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T08:46:28,533 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 395 msec 2024-11-19T08:46:32,492 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T08:46:32,492 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T08:46:32,494 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-19T08:46:32,494 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-19T08:46:32,495 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T08:46:32,495 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T08:46:32,496 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T08:46:32,496 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-19T08:46:33,351 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T08:46:33,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:46:33,376 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:46:33,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:46:33,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:46:33,390 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-19T08:46:38,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41505 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T08:46:38,231 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-19T08:46:38,231 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-19T08:46:38,235 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-19T08:46:38,235 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc. 2024-11-19T08:46:38,248 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:46:38,252 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:46:38,253 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:46:38,253 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:46:38,253 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T08:46:38,253 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c68a5ac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:46:38,254 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13bc47a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:46:38,370 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@21371268{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/java.io.tmpdir/jetty-localhost-44087-hadoop-hdfs-3_4_1-tests_jar-_-any-11679754509532883906/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:46:38,371 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@138afca9{HTTP/1.1, (http/1.1)}{localhost:44087} 2024-11-19T08:46:38,371 INFO [Time-limited test {}] server.Server(415): Started @122899ms 2024-11-19T08:46:38,372 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:46:38,405 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:46:38,409 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:46:38,410 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:46:38,410 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:46:38,411 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T08:46:38,411 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60d7a404{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:46:38,412 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3727c2a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:46:38,506 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b41f435{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/java.io.tmpdir/jetty-localhost-39035-hadoop-hdfs-3_4_1-tests_jar-_-any-11716299403240254202/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:46:38,507 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1fa52141{HTTP/1.1, (http/1.1)}{localhost:39035} 2024-11-19T08:46:38,507 INFO [Time-limited test {}] server.Server(415): Started @123035ms 2024-11-19T08:46:38,508 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:46:38,545 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:46:38,549 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:46:38,551 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:46:38,551 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:46:38,551 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T08:46:38,552 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@36e646c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:46:38,553 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43081444{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:46:38,650 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4b81e014{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/java.io.tmpdir/jetty-localhost-41449-hadoop-hdfs-3_4_1-tests_jar-_-any-16212649360486231358/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:46:38,650 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@33f41830{HTTP/1.1, (http/1.1)}{localhost:41449} 2024-11-19T08:46:38,650 INFO [Time-limited test {}] server.Server(415): Started @123178ms 2024-11-19T08:46:38,651 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:46:39,305 WARN [Thread-866 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data5/current/BP-571785619-172.17.0.2-1732005984737/current, will proceed with Du for space computation calculation, 2024-11-19T08:46:39,305 WARN [Thread-867 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data6/current/BP-571785619-172.17.0.2-1732005984737/current, will proceed with Du for space computation calculation, 2024-11-19T08:46:39,324 WARN [Thread-808 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:46:39,326 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x92b9e9ecddf5f06b with lease ID 0x8808a5ba8148fe20: Processing first storage report for DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e from datanode DatanodeRegistration(127.0.0.1:33279, datanodeUuid=c2c85126-7588-4581-b1ef-3cf98a00816f, infoPort=32841, infoSecurePort=0, ipcPort=46171, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737) 2024-11-19T08:46:39,326 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x92b9e9ecddf5f06b with lease ID 0x8808a5ba8148fe20: from storage DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e node DatanodeRegistration(127.0.0.1:33279, datanodeUuid=c2c85126-7588-4581-b1ef-3cf98a00816f, infoPort=32841, infoSecurePort=0, ipcPort=46171, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:46:39,326 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x92b9e9ecddf5f06b with lease ID 0x8808a5ba8148fe20: Processing first storage report for DS-8746514a-c29b-41b3-a962-51c536f06350 from datanode DatanodeRegistration(127.0.0.1:33279, datanodeUuid=c2c85126-7588-4581-b1ef-3cf98a00816f, infoPort=32841, infoSecurePort=0, ipcPort=46171, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737) 2024-11-19T08:46:39,326 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x92b9e9ecddf5f06b with lease ID 0x8808a5ba8148fe20: from storage DS-8746514a-c29b-41b3-a962-51c536f06350 node DatanodeRegistration(127.0.0.1:33279, datanodeUuid=c2c85126-7588-4581-b1ef-3cf98a00816f, infoPort=32841, infoSecurePort=0, ipcPort=46171, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:46:39,544 WARN [Thread-878 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data7/current/BP-571785619-172.17.0.2-1732005984737/current, will proceed with Du for space computation calculation, 2024-11-19T08:46:39,544 WARN [Thread-879 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data8/current/BP-571785619-172.17.0.2-1732005984737/current, will proceed with Du for space computation calculation, 2024-11-19T08:46:39,563 WARN [Thread-830 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:46:39,566 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xac62dea5167c7da2 with lease ID 0x8808a5ba8148fe21: Processing first storage report for DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e from datanode DatanodeRegistration(127.0.0.1:39885, datanodeUuid=59b8e6d0-1923-4aab-975c-98f0dd2ef0b7, infoPort=37175, infoSecurePort=0, ipcPort=46381, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737) 2024-11-19T08:46:39,566 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xac62dea5167c7da2 with lease ID 0x8808a5ba8148fe21: from storage DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e node DatanodeRegistration(127.0.0.1:39885, datanodeUuid=59b8e6d0-1923-4aab-975c-98f0dd2ef0b7, infoPort=37175, infoSecurePort=0, ipcPort=46381, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:46:39,566 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xac62dea5167c7da2 with lease ID 0x8808a5ba8148fe21: Processing first storage report for DS-947b50fc-7c8e-4a76-9358-2087dd32f57b from datanode DatanodeRegistration(127.0.0.1:39885, datanodeUuid=59b8e6d0-1923-4aab-975c-98f0dd2ef0b7, infoPort=37175, infoSecurePort=0, ipcPort=46381, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737) 2024-11-19T08:46:39,566 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xac62dea5167c7da2 with lease ID 0x8808a5ba8148fe21: from storage DS-947b50fc-7c8e-4a76-9358-2087dd32f57b node DatanodeRegistration(127.0.0.1:39885, datanodeUuid=59b8e6d0-1923-4aab-975c-98f0dd2ef0b7, infoPort=37175, infoSecurePort=0, ipcPort=46381, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:46:39,690 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data9/current/BP-571785619-172.17.0.2-1732005984737/current, will proceed with Du for space computation calculation, 2024-11-19T08:46:39,690 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data10/current/BP-571785619-172.17.0.2-1732005984737/current, will proceed with Du for space computation calculation, 2024-11-19T08:46:39,710 WARN [Thread-852 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:46:39,712 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6885738fbb246d74 with lease ID 0x8808a5ba8148fe22: Processing first storage report for DS-0662beeb-0540-4986-abea-05906b0a61f8 from datanode DatanodeRegistration(127.0.0.1:43925, datanodeUuid=c272d492-67fc-4e0b-88fe-ab1ee6092ebb, infoPort=33103, infoSecurePort=0, ipcPort=44521, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737) 2024-11-19T08:46:39,712 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6885738fbb246d74 with lease ID 0x8808a5ba8148fe22: from storage DS-0662beeb-0540-4986-abea-05906b0a61f8 node DatanodeRegistration(127.0.0.1:43925, datanodeUuid=c272d492-67fc-4e0b-88fe-ab1ee6092ebb, infoPort=33103, infoSecurePort=0, ipcPort=44521, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:46:39,712 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6885738fbb246d74 with lease ID 0x8808a5ba8148fe22: Processing first storage report for DS-d6c0ee2d-470b-452b-809a-3abb29b5d13a from datanode DatanodeRegistration(127.0.0.1:43925, datanodeUuid=c272d492-67fc-4e0b-88fe-ab1ee6092ebb, infoPort=33103, infoSecurePort=0, ipcPort=44521, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737) 2024-11-19T08:46:39,712 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6885738fbb246d74 with lease ID 0x8808a5ba8148fe22: from storage DS-d6c0ee2d-470b-452b-809a-3abb29b5d13a node DatanodeRegistration(127.0.0.1:43925, datanodeUuid=c272d492-67fc-4e0b-88fe-ab1ee6092ebb, infoPort=33103, infoSecurePort=0, ipcPort=44521, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:46:39,796 WARN [ResponseProcessor for block BP-571785619-172.17.0.2-1732005984737:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-571785619-172.17.0.2-1732005984737:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-571785619-172.17.0.2-1732005984737:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:39,796 WARN [ResponseProcessor for block BP-571785619-172.17.0.2-1732005984737:blk_1073741832_1008 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-571785619-172.17.0.2-1732005984737:blk_1073741832_1008 java.io.IOException: Bad response ERROR for BP-571785619-172.17.0.2-1732005984737:blk_1073741832_1008 from datanode DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:39,797 WARN [ResponseProcessor for block BP-571785619-172.17.0.2-1732005984737:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-571785619-172.17.0.2-1732005984737:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-571785619-172.17.0.2-1732005984737:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:39,798 WARN [DataStreamer for file /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005987295 block BP-571785619-172.17.0.2-1732005984737:blk_1073741832_1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741832_1008 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK], DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK]) is bad. 2024-11-19T08:46:39,798 WARN [DataStreamer for file /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta block BP-571785619-172.17.0.2-1732005984737:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK], DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK]) is bad. 2024-11-19T08:46:39,798 WARN [DataStreamer for file /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 block BP-571785619-172.17.0.2-1732005984737:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK], DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK]) is bad. 2024-11-19T08:46:39,798 WARN [ResponseProcessor for block BP-571785619-172.17.0.2-1732005984737:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-571785619-172.17.0.2-1732005984737:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:39,798 WARN [PacketResponder: BP-571785619-172.17.0.2-1732005984737:blk_1073741832_1008, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40261] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:39,798 WARN [PacketResponder: BP-571785619-172.17.0.2-1732005984737:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40261] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:39,798 WARN [PacketResponder: BP-571785619-172.17.0.2-1732005984737:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40261] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:39,800 WARN [DataStreamer for file /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/WALs/3ab37fa97a98,41505,1732005986486/3ab37fa97a98%2C41505%2C1732005986486.1732005986781 block BP-571785619-172.17.0.2-1732005984737:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK], DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK]) is bad. 2024-11-19T08:46:39,801 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:60392 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:46153:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60392 dst: /127.0.0.1:46153 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:39,801 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-977694441_22 at /127.0.0.1:60418 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:46153:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60418 dst: /127.0.0.1:46153 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:39,801 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1518076557_22 at /127.0.0.1:35642 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40261:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35642 dst: /127.0.0.1:40261 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:39,801 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:35656 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:40261:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35656 dst: /127.0.0.1:40261 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:39,801 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:35680 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40261:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35680 dst: /127.0.0.1:40261 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:39,802 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:60400 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46153:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60400 dst: /127.0.0.1:46153 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:39,802 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ab51772{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:46:39,802 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1518076557_22 at /127.0.0.1:60380 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46153:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60380 dst: /127.0.0.1:46153 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:39,803 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@596c0e8d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:46:39,803 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:46:39,803 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@779704d3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:46:39,803 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70de2eeb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/hadoop.log.dir/,STOPPED} 2024-11-19T08:46:39,805 WARN [BP-571785619-172.17.0.2-1732005984737 heartbeating to localhost/127.0.0.1:36955 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:46:39,805 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:46:39,805 WARN [BP-571785619-172.17.0.2-1732005984737 heartbeating to localhost/127.0.0.1:36955 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-571785619-172.17.0.2-1732005984737 (Datanode Uuid f0aa2952-855c-402a-84ee-54458d718d25) service to localhost/127.0.0.1:36955 2024-11-19T08:46:39,805 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:46:39,806 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data3/current/BP-571785619-172.17.0.2-1732005984737 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:46:39,806 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data4/current/BP-571785619-172.17.0.2-1732005984737 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:46:39,806 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:46:39,806 WARN [DataStreamer for file /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/WALs/3ab37fa97a98,41505,1732005986486/3ab37fa97a98%2C41505%2C1732005986486.1732005986781 block BP-571785619-172.17.0.2-1732005984737:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:39,806 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-977694441_22 at /127.0.0.1:35696 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:40261:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35696 dst: /127.0.0.1:40261 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:39,807 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@38cdedad {}] datanode.DataXceiver(331): 127.0.0.1:46153:DataXceiver error processing unknown operation src: /127.0.0.1:59082 dst: /127.0.0.1:46153 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:39,807 WARN [DataStreamer for file /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 block BP-571785619-172.17.0.2-1732005984737:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:39,812 WARN [DataStreamer for file /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005987295 block BP-571785619-172.17.0.2-1732005984737:blk_1073741832_1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741832_1008 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:39,812 WARN [DataStreamer for file /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta block BP-571785619-172.17.0.2-1732005984737:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:39,812 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@10f6ef1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:46:39,812 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@c5ffb55{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:46:39,812 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:46:39,813 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@225c4391{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:46:39,813 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7359eab1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/hadoop.log.dir/,STOPPED} 2024-11-19T08:46:39,814 WARN [BP-571785619-172.17.0.2-1732005984737 heartbeating to localhost/127.0.0.1:36955 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:46:39,814 WARN [BP-571785619-172.17.0.2-1732005984737 heartbeating to localhost/127.0.0.1:36955 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-571785619-172.17.0.2-1732005984737 (Datanode Uuid ddb8880e-f7de-4466-b1b2-c9a2b0e56f7a) service to localhost/127.0.0.1:36955 2024-11-19T08:46:39,814 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:46:39,814 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:46:39,814 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data1/current/BP-571785619-172.17.0.2-1732005984737 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:46:39,814 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data2/current/BP-571785619-172.17.0.2-1732005984737 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:46:39,814 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:46:39,818 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc., hostname=3ab37fa97a98,46031,1732005986641, seqNum=2] 2024-11-19T08:46:39,819 ERROR [FSHLog-0-hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882-prefix:3ab37fa97a98,46031,1732005986641 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:39,820 WARN [FSHLog-0-hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882-prefix:3ab37fa97a98,46031,1732005986641 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:39,820 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3ab37fa97a98%2C46031%2C1732005986641:(num 1732005987295) roll requested 2024-11-19T08:46:39,820 INFO [regionserver/3ab37fa97a98:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C46031%2C1732005986641.1732005999820 2024-11-19T08:46:39,836 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:39,836 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:39,836 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:39,836 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:39,836 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:39,836 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005987295 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005999820 2024-11-19T08:46:39,837 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:39,837 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:39,838 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-19T08:46:39,838 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-19T08:46:39,838 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005987295 2024-11-19T08:46:39,840 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37175:37175),(127.0.0.1/127.0.0.1:33103:33103)] 2024-11-19T08:46:39,840 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005987295 is not closed yet, will try archiving it next time 2024-11-19T08:46:39,841 WARN [IPC Server handler 3 on default port 36955 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005987295 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741832_1008 2024-11-19T08:46:39,845 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005987295 after 5ms 2024-11-19T08:46:40,098 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:40,758 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:41,840 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:41,841 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005999820 2024-11-19T08:46:41,842 WARN [ResponseProcessor for block BP-571785619-172.17.0.2-1732005984737:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-571785619-172.17.0.2-1732005984737:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:41,842 WARN [DataStreamer for file /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005999820 block BP-571785619-172.17.0.2-1732005984737:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK], DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK]) is bad. 2024-11-19T08:46:41,843 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:45732 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:39885:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45732 dst: /127.0.0.1:39885 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:41,843 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:44868 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:43925:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44868 dst: /127.0.0.1:43925 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:41,844 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b41f435{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:46:41,844 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1fa52141{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:46:41,844 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:46:41,844 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3727c2a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:46:41,844 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60d7a404{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/hadoop.log.dir/,STOPPED} 2024-11-19T08:46:41,846 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:46:41,846 WARN [BP-571785619-172.17.0.2-1732005984737 heartbeating to localhost/127.0.0.1:36955 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:46:41,846 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:46:41,846 WARN [BP-571785619-172.17.0.2-1732005984737 heartbeating to localhost/127.0.0.1:36955 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-571785619-172.17.0.2-1732005984737 (Datanode Uuid 59b8e6d0-1923-4aab-975c-98f0dd2ef0b7) service to localhost/127.0.0.1:36955 2024-11-19T08:46:41,846 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data7/current/BP-571785619-172.17.0.2-1732005984737 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:46:41,847 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data8/current/BP-571785619-172.17.0.2-1732005984737 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:46:41,847 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:46:42,099 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:42,758 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:43,840 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:43,841 WARN [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]] 2024-11-19T08:46:43,842 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3ab37fa97a98%2C46031%2C1732005986641:(num 1732005999820) roll requested 2024-11-19T08:46:43,842 INFO [regionserver/3ab37fa97a98:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C46031%2C1732005986641.1732006003842 2024-11-19T08:46:43,847 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005987295 after 4009ms 2024-11-19T08:46:43,850 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39885 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:43,850 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:49534 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741839_1021] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data10]'}, localName='127.0.0.1:43925', datanodeUuid='c272d492-67fc-4e0b-88fe-ab1ee6092ebb', xmitsInProgress=0}:Exception transferring block BP-571785619-172.17.0.2-1732005984737:blk_1073741839_1021 to mirror 127.0.0.1:39885 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:43,850 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK], DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK]) is bad. 2024-11-19T08:46:43,850 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741839_1021 2024-11-19T08:46:43,850 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:49534 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741839_1021] {}] datanode.BlockReceiver(316): Block 1073741839 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T08:46:43,850 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:49534 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:43925:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49534 dst: /127.0.0.1:43925 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:43,853 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK] 2024-11-19T08:46:43,855 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T08:46:43,858 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46153 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:43,858 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:49536 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data10]'}, localName='127.0.0.1:43925', datanodeUuid='c272d492-67fc-4e0b-88fe-ab1ee6092ebb', xmitsInProgress=0}:Exception transferring block BP-571785619-172.17.0.2-1732005984737:blk_1073741840_1022 to mirror 127.0.0.1:46153 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:43,858 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK], DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]) is bad. 2024-11-19T08:46:43,858 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741840_1022 2024-11-19T08:46:43,858 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:49536 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T08:46:43,858 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:49536 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:43925:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49536 dst: /127.0.0.1:43925 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:43,859 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK] 2024-11-19T08:46:43,860 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:43,860 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK], DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK]) is bad. 2024-11-19T08:46:43,860 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741841_1023 2024-11-19T08:46:43,861 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK] 2024-11-19T08:46:43,867 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:43,868 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:43,868 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:43,868 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:43,868 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:43,868 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005999820 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732006003842 2024-11-19T08:46:43,869 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33103:33103),(127.0.0.1/127.0.0.1:32841:32841)] 2024-11-19T08:46:43,869 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005987295 is not closed yet, will try archiving it next time 2024-11-19T08:46:43,869 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005999820 is not closed yet, will try archiving it next time 2024-11-19T08:46:43,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43925 is added to blk_1073741838_1020 (size=3600) 2024-11-19T08:46:44,099 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:44,271 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005987295 is not closed yet, will try archiving it next time 2024-11-19T08:46:44,759 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:45,858 WARN [ResponseProcessor for block BP-571785619-172.17.0.2-1732005984737:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-571785619-172.17.0.2-1732005984737:blk_1073741842_1024 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:45,859 WARN [DataStreamer for file /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732006003842 block BP-571785619-172.17.0.2-1732005984737:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK], DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]) is bad. 2024-11-19T08:46:45,859 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:49552 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:43925:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49552 dst: /127.0.0.1:43925 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:45,859 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46546 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:33279:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46546 dst: /127.0.0.1:33279 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:45,869 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:45,869 WARN [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK]] 2024-11-19T08:46:45,869 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3ab37fa97a98%2C46031%2C1732005986641:(num 1732006003842) roll requested 2024-11-19T08:46:45,870 INFO [regionserver/3ab37fa97a98:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C46031%2C1732005986641.1732006005869 2024-11-19T08:46:45,872 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:45,873 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK], DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]) is bad. 2024-11-19T08:46:45,873 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741843_1026 2024-11-19T08:46:45,873 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK] 2024-11-19T08:46:45,875 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:45,875 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK], DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK]) is bad. 2024-11-19T08:46:45,875 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741844_1027 2024-11-19T08:46:45,876 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK] 2024-11-19T08:46:45,877 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:45,877 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK], DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK]) is bad. 2024-11-19T08:46:45,877 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741845_1028 2024-11-19T08:46:45,878 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK] 2024-11-19T08:46:45,879 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:45,879 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK], DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]) is bad. 2024-11-19T08:46:45,879 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741846_1029 2024-11-19T08:46:45,880 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK] 2024-11-19T08:46:45,881 WARN [IPC Server handler 3 on default port 36955 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T08:46:45,881 WARN [IPC Server handler 3 on default port 36955 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T08:46:45,881 WARN [IPC Server handler 3 on default port 36955 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T08:46:45,884 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:45,884 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:45,884 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:45,884 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:45,884 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:45,884 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732006003842 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732006005869 2024-11-19T08:46:45,885 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32841:32841)] 2024-11-19T08:46:45,885 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005987295 is not closed yet, will try archiving it next time 2024-11-19T08:46:45,885 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732006003842 is not closed yet, will try archiving it next time 2024-11-19T08:46:45,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741842_1025 (size=93) 2024-11-19T08:46:45,899 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4b81e014{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:46:45,899 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@33f41830{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:46:45,899 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:46:45,899 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43081444{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:46:45,900 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@36e646c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/hadoop.log.dir/,STOPPED} 2024-11-19T08:46:45,901 WARN [BP-571785619-172.17.0.2-1732005984737 heartbeating to localhost/127.0.0.1:36955 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:46:45,901 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:46:45,901 WARN [BP-571785619-172.17.0.2-1732005984737 heartbeating to localhost/127.0.0.1:36955 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-571785619-172.17.0.2-1732005984737 (Datanode Uuid c272d492-67fc-4e0b-88fe-ab1ee6092ebb) service to localhost/127.0.0.1:36955 2024-11-19T08:46:45,901 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:46:45,902 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data9/current/BP-571785619-172.17.0.2-1732005984737 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:46:45,902 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data10/current/BP-571785619-172.17.0.2-1732005984737 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:46:45,902 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:46:45,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46031 {}] regionserver.HRegion(8855): Flush requested on ffe88a4e91dad7fbc582782eb91f58fc 2024-11-19T08:46:45,913 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ffe88a4e91dad7fbc582782eb91f58fc 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T08:46:45,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/cd6a7701162b494d946f5e5b1a27b1af is 1080, key is row0002/info:/1732006001849/Put/seqid=0 2024-11-19T08:46:45,935 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:45,935 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK], DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]) is bad. 2024-11-19T08:46:45,935 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741848_1031 2024-11-19T08:46:45,936 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK] 2024-11-19T08:46:45,937 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:45,937 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK], DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK]) is bad. 2024-11-19T08:46:45,937 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741849_1032 2024-11-19T08:46:45,938 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK] 2024-11-19T08:46:45,940 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40261 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:45,940 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46562 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data6]'}, localName='127.0.0.1:33279', datanodeUuid='c2c85126-7588-4581-b1ef-3cf98a00816f', xmitsInProgress=0}:Exception transferring block BP-571785619-172.17.0.2-1732005984737:blk_1073741850_1033 to mirror 127.0.0.1:40261 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:45,940 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK], DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK]) is bad. 2024-11-19T08:46:45,940 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741850_1033 2024-11-19T08:46:45,940 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46562 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T08:46:45,940 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46562 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:33279:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46562 dst: /127.0.0.1:33279 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:45,940 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK] 2024-11-19T08:46:45,941 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:45,942 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK], DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]) is bad. 2024-11-19T08:46:45,942 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741851_1034 2024-11-19T08:46:45,942 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK] 2024-11-19T08:46:45,943 WARN [IPC Server handler 1 on default port 36955 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T08:46:45,943 WARN [IPC Server handler 1 on default port 36955 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T08:46:45,943 WARN [IPC Server handler 1 on default port 36955 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T08:46:45,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741852_1035 (size=10347) 2024-11-19T08:46:46,100 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:46,287 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005987295 is not closed yet, will try archiving it next time 2024-11-19T08:46:46,288 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732006003842 to hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/oldWALs/3ab37fa97a98%2C46031%2C1732005986641.1732006003842 2024-11-19T08:46:46,339 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@9512b21[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33279, datanodeUuid=c2c85126-7588-4581-b1ef-3cf98a00816f, infoPort=32841, infoSecurePort=0, ipcPort=46171, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737):Failed to transfer BP-571785619-172.17.0.2-1732005984737:blk_1073741852_1035 to 127.0.0.1:46153 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:46,339 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@cd229e6[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33279, datanodeUuid=c2c85126-7588-4581-b1ef-3cf98a00816f, infoPort=32841, infoSecurePort=0, ipcPort=46171, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737):Failed to transfer BP-571785619-172.17.0.2-1732005984737:blk_1073741842_1025 to 127.0.0.1:43925 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:46,347 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/cd6a7701162b494d946f5e5b1a27b1af 2024-11-19T08:46:46,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/cd6a7701162b494d946f5e5b1a27b1af as hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/cd6a7701162b494d946f5e5b1a27b1af 2024-11-19T08:46:46,366 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/cd6a7701162b494d946f5e5b1a27b1af, entries=5, sequenceid=11, filesize=10.1 K 2024-11-19T08:46:46,367 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for ffe88a4e91dad7fbc582782eb91f58fc in 454ms, sequenceid=11, compaction requested=false 2024-11-19T08:46:46,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ffe88a4e91dad7fbc582782eb91f58fc: 2024-11-19T08:46:46,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46031 {}] regionserver.HRegion(8855): Flush requested on ffe88a4e91dad7fbc582782eb91f58fc 2024-11-19T08:46:46,541 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ffe88a4e91dad7fbc582782eb91f58fc 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-19T08:46:46,550 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/4cb4d7c0c4ae41bfb2049fab45e72f8b is 1080, key is row0007/info:/1732006005915/Put/seqid=0 2024-11-19T08:46:46,552 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:46,552 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK], DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK]) is bad. 2024-11-19T08:46:46,552 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741853_1036 2024-11-19T08:46:46,552 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK] 2024-11-19T08:46:46,554 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:46,554 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK], DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK]) is bad. 2024-11-19T08:46:46,554 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741854_1037 2024-11-19T08:46:46,554 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK] 2024-11-19T08:46:46,555 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:46,556 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK], DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]) is bad. 2024-11-19T08:46:46,556 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741855_1038 2024-11-19T08:46:46,556 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK] 2024-11-19T08:46:46,557 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:46,557 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK], DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]) is bad. 2024-11-19T08:46:46,557 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741856_1039 2024-11-19T08:46:46,558 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK] 2024-11-19T08:46:46,558 WARN [IPC Server handler 3 on default port 36955 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T08:46:46,558 WARN [IPC Server handler 3 on default port 36955 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T08:46:46,559 WARN [IPC Server handler 3 on default port 36955 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T08:46:46,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741857_1040 (size=12506) 2024-11-19T08:46:46,759 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:46,963 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/4cb4d7c0c4ae41bfb2049fab45e72f8b 2024-11-19T08:46:46,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/4cb4d7c0c4ae41bfb2049fab45e72f8b as hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/4cb4d7c0c4ae41bfb2049fab45e72f8b 2024-11-19T08:46:46,979 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/4cb4d7c0c4ae41bfb2049fab45e72f8b, entries=7, sequenceid=24, filesize=12.2 K 2024-11-19T08:46:46,980 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for ffe88a4e91dad7fbc582782eb91f58fc in 439ms, sequenceid=24, compaction requested=false 2024-11-19T08:46:46,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ffe88a4e91dad7fbc582782eb91f58fc: 2024-11-19T08:46:46,981 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-19T08:46:46,981 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:46:46,981 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/4cb4d7c0c4ae41bfb2049fab45e72f8b because midkey is the same as first or last row 2024-11-19T08:46:47,885 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:47,886 WARN [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK]] 2024-11-19T08:46:47,886 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3ab37fa97a98%2C46031%2C1732005986641:(num 1732006005869) roll requested 2024-11-19T08:46:47,886 INFO [regionserver/3ab37fa97a98:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C46031%2C1732005986641.1732006007886 2024-11-19T08:46:47,891 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:47,891 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK], DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]) is bad. 2024-11-19T08:46:47,891 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741858_1041 2024-11-19T08:46:47,892 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK] 2024-11-19T08:46:47,894 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:47,895 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK], DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK]) is bad. 2024-11-19T08:46:47,895 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741859_1042 2024-11-19T08:46:47,896 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK] 2024-11-19T08:46:47,899 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39885 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:47,899 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46600 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data6]'}, localName='127.0.0.1:33279', datanodeUuid='c2c85126-7588-4581-b1ef-3cf98a00816f', xmitsInProgress=0}:Exception transferring block BP-571785619-172.17.0.2-1732005984737:blk_1073741860_1043 to mirror 127.0.0.1:39885 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:47,899 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK], DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK]) is bad. 2024-11-19T08:46:47,899 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741860_1043 2024-11-19T08:46:47,899 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46600 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T08:46:47,900 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46600 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:33279:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46600 dst: /127.0.0.1:33279 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:47,901 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK] 2024-11-19T08:46:47,904 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43925 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:47,904 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46610 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data6]'}, localName='127.0.0.1:33279', datanodeUuid='c2c85126-7588-4581-b1ef-3cf98a00816f', xmitsInProgress=0}:Exception transferring block BP-571785619-172.17.0.2-1732005984737:blk_1073741861_1044 to mirror 127.0.0.1:43925 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:47,905 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK], DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]) is bad. 2024-11-19T08:46:47,905 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741861_1044 2024-11-19T08:46:47,905 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46610 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T08:46:47,905 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46610 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:33279:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46610 dst: /127.0.0.1:33279 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:47,906 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK] 2024-11-19T08:46:47,906 WARN [IPC Server handler 0 on default port 36955 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T08:46:47,907 WARN [IPC Server handler 0 on default port 36955 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T08:46:47,907 WARN [IPC Server handler 0 on default port 36955 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T08:46:47,909 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:47,909 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:47,910 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:47,910 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:47,910 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:47,910 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732006005869 with entries=24, filesize=24.23 KB; new WAL /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732006007886 2024-11-19T08:46:47,911 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32841:32841)] 2024-11-19T08:46:47,911 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005987295 is not closed yet, will try archiving it next time 2024-11-19T08:46:47,911 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732006005869 is not closed yet, will try archiving it next time 2024-11-19T08:46:47,911 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005999820 to hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/oldWALs/3ab37fa97a98%2C46031%2C1732005986641.1732005999820 2024-11-19T08:46:47,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741847_1030 (size=24823) 2024-11-19T08:46:47,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46031 {}] regionserver.HRegion(8855): Flush requested on ffe88a4e91dad7fbc582782eb91f58fc 2024-11-19T08:46:47,970 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ffe88a4e91dad7fbc582782eb91f58fc 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-19T08:46:47,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/57930dcce2b14645976e2b5f6cd244e1 is 1079, key is tmprow/info:/1732006007969/Put/seqid=0 2024-11-19T08:46:47,980 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39885 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:47,980 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46634 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data6]'}, localName='127.0.0.1:33279', datanodeUuid='c2c85126-7588-4581-b1ef-3cf98a00816f', xmitsInProgress=0}:Exception transferring block BP-571785619-172.17.0.2-1732005984737:blk_1073741863_1046 to mirror 127.0.0.1:39885 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:47,980 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK], DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK]) is bad. 2024-11-19T08:46:47,980 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741863_1046 2024-11-19T08:46:47,981 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46634 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T08:46:47,981 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46634 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:33279:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46634 dst: /127.0.0.1:33279 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:47,981 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK] 2024-11-19T08:46:47,983 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:47,983 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK], DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]) is bad. 2024-11-19T08:46:47,983 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741864_1047 2024-11-19T08:46:47,983 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK] 2024-11-19T08:46:47,985 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:47,985 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK], DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK]) is bad. 2024-11-19T08:46:47,985 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741865_1048 2024-11-19T08:46:47,985 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK] 2024-11-19T08:46:47,986 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:47,987 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK], DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]) is bad. 2024-11-19T08:46:47,987 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741866_1049 2024-11-19T08:46:47,987 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK] 2024-11-19T08:46:47,988 WARN [IPC Server handler 4 on default port 36955 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T08:46:47,988 WARN [IPC Server handler 4 on default port 36955 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T08:46:47,988 WARN [IPC Server handler 4 on default port 36955 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T08:46:47,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741867_1050 (size=6027) 2024-11-19T08:46:48,100 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:48,312 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005987295 is not closed yet, will try archiving it next time 2024-11-19T08:46:48,393 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/57930dcce2b14645976e2b5f6cd244e1 2024-11-19T08:46:48,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/57930dcce2b14645976e2b5f6cd244e1 as hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/57930dcce2b14645976e2b5f6cd244e1 2024-11-19T08:46:48,409 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/57930dcce2b14645976e2b5f6cd244e1, entries=1, sequenceid=34, filesize=5.9 K 2024-11-19T08:46:48,411 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for ffe88a4e91dad7fbc582782eb91f58fc in 441ms, sequenceid=34, compaction requested=true 2024-11-19T08:46:48,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ffe88a4e91dad7fbc582782eb91f58fc: 2024-11-19T08:46:48,411 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-19T08:46:48,411 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:46:48,411 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/4cb4d7c0c4ae41bfb2049fab45e72f8b because midkey is the same as first or last row 2024-11-19T08:46:48,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ffe88a4e91dad7fbc582782eb91f58fc:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T08:46:48,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:46:48,412 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T08:46:48,413 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T08:46:48,413 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.HStore(1541): ffe88a4e91dad7fbc582782eb91f58fc/info is initiating minor compaction (all files) 2024-11-19T08:46:48,413 INFO [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ffe88a4e91dad7fbc582782eb91f58fc/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc. 2024-11-19T08:46:48,414 INFO [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/cd6a7701162b494d946f5e5b1a27b1af, hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/4cb4d7c0c4ae41bfb2049fab45e72f8b, hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/57930dcce2b14645976e2b5f6cd244e1] into tmpdir=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp, totalSize=28.2 K 2024-11-19T08:46:48,414 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] compactions.Compactor(225): Compacting cd6a7701162b494d946f5e5b1a27b1af, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732006001849 2024-11-19T08:46:48,415 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4cb4d7c0c4ae41bfb2049fab45e72f8b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732006005915 2024-11-19T08:46:48,415 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] compactions.Compactor(225): Compacting 57930dcce2b14645976e2b5f6cd244e1, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732006007969 2024-11-19T08:46:48,431 INFO [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ffe88a4e91dad7fbc582782eb91f58fc#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T08:46:48,432 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/a66ff0ec34864cfb83489f663f522490 is 1080, key is row0002/info:/1732006001849/Put/seqid=0 2024-11-19T08:46:48,435 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46660 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741868_1051] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data6]'}, localName='127.0.0.1:33279', datanodeUuid='c2c85126-7588-4581-b1ef-3cf98a00816f', xmitsInProgress=0}:Exception transferring block BP-571785619-172.17.0.2-1732005984737:blk_1073741868_1051 to mirror 127.0.0.1:46153 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:48,435 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46153 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:48,435 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46660 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741868_1051] {}] datanode.BlockReceiver(316): Block 1073741868 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T08:46:48,435 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK], DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]) is bad. 2024-11-19T08:46:48,435 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741868_1051 2024-11-19T08:46:48,435 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46660 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741868_1051] {}] datanode.DataXceiver(331): 127.0.0.1:33279:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46660 dst: /127.0.0.1:33279 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:48,436 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK] 2024-11-19T08:46:48,437 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:48,438 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK], DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK]) is bad. 2024-11-19T08:46:48,438 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741869_1052 2024-11-19T08:46:48,438 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK] 2024-11-19T08:46:48,441 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40261 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:48,441 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46674 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data6]'}, localName='127.0.0.1:33279', datanodeUuid='c2c85126-7588-4581-b1ef-3cf98a00816f', xmitsInProgress=0}:Exception transferring block BP-571785619-172.17.0.2-1732005984737:blk_1073741870_1053 to mirror 127.0.0.1:40261 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:48,441 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK], DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK]) is bad. 2024-11-19T08:46:48,441 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741870_1053 2024-11-19T08:46:48,441 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46674 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T08:46:48,441 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46674 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:33279:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46674 dst: /127.0.0.1:33279 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:48,441 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK] 2024-11-19T08:46:48,443 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:48,443 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK], DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]) is bad. 2024-11-19T08:46:48,443 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741871_1054 2024-11-19T08:46:48,444 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK] 2024-11-19T08:46:48,445 WARN [IPC Server handler 0 on default port 36955 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T08:46:48,445 WARN [IPC Server handler 0 on default port 36955 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T08:46:48,445 WARN [IPC Server handler 0 on default port 36955 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T08:46:48,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741872_1055 (size=17994) 2024-11-19T08:46:48,760 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:48,867 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/a66ff0ec34864cfb83489f663f522490 as hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/a66ff0ec34864cfb83489f663f522490 2024-11-19T08:46:48,876 INFO [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ffe88a4e91dad7fbc582782eb91f58fc/info of ffe88a4e91dad7fbc582782eb91f58fc into a66ff0ec34864cfb83489f663f522490(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T08:46:48,876 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ffe88a4e91dad7fbc582782eb91f58fc: 2024-11-19T08:46:48,876 INFO [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc., storeName=ffe88a4e91dad7fbc582782eb91f58fc/info, priority=13, startTime=1732006008411; duration=0sec 2024-11-19T08:46:48,876 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-19T08:46:48,876 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:46:48,876 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/a66ff0ec34864cfb83489f663f522490 because midkey is the same as first or last row 2024-11-19T08:46:48,876 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-19T08:46:48,876 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:46:48,876 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/a66ff0ec34864cfb83489f663f522490 because midkey is the same as first or last row 2024-11-19T08:46:48,876 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-19T08:46:48,876 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:46:48,876 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/a66ff0ec34864cfb83489f663f522490 because midkey is the same as first or last row 2024-11-19T08:46:48,876 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:46:48,877 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ffe88a4e91dad7fbc582782eb91f58fc:info 2024-11-19T08:46:49,330 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@cd229e6[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33279, datanodeUuid=c2c85126-7588-4581-b1ef-3cf98a00816f, infoPort=32841, infoSecurePort=0, ipcPort=46171, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737):Failed to transfer BP-571785619-172.17.0.2-1732005984737:blk_1073741857_1040 to 127.0.0.1:46153 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:49,330 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@9512b21[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33279, datanodeUuid=c2c85126-7588-4581-b1ef-3cf98a00816f, infoPort=32841, infoSecurePort=0, ipcPort=46171, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737):Failed to transfer BP-571785619-172.17.0.2-1732005984737:blk_1073741847_1030 to 127.0.0.1:40261 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:49,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46031 {}] regionserver.HRegion(8855): Flush requested on ffe88a4e91dad7fbc582782eb91f58fc 2024-11-19T08:46:49,398 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ffe88a4e91dad7fbc582782eb91f58fc 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-19T08:46:49,404 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/2a895e5f2cc0412bb81c26d8bde9d70f is 1079, key is tmprow/info:/1732006009395/Put/seqid=0 2024-11-19T08:46:49,406 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:49,406 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK], DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK]) is bad. 2024-11-19T08:46:49,406 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741873_1056 2024-11-19T08:46:49,407 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK] 2024-11-19T08:46:49,408 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:49,408 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK], DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK]) is bad. 2024-11-19T08:46:49,408 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741874_1057 2024-11-19T08:46:49,409 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40261,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK] 2024-11-19T08:46:49,410 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:49,410 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK], DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]) is bad. 2024-11-19T08:46:49,410 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741875_1058 2024-11-19T08:46:49,410 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK] 2024-11-19T08:46:49,413 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46153 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:49,413 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46698 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741876_1059] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data6]'}, localName='127.0.0.1:33279', datanodeUuid='c2c85126-7588-4581-b1ef-3cf98a00816f', xmitsInProgress=0}:Exception transferring block BP-571785619-172.17.0.2-1732005984737:blk_1073741876_1059 to mirror 127.0.0.1:46153 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:49,413 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK], DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]) is bad. 2024-11-19T08:46:49,413 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741876_1059 2024-11-19T08:46:49,413 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46698 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741876_1059] {}] datanode.BlockReceiver(316): Block 1073741876 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T08:46:49,413 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:46698 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741876_1059] {}] datanode.DataXceiver(331): 127.0.0.1:33279:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46698 dst: /127.0.0.1:33279 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:49,414 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK] 2024-11-19T08:46:49,415 WARN [IPC Server handler 4 on default port 36955 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T08:46:49,415 WARN [IPC Server handler 4 on default port 36955 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T08:46:49,415 WARN [IPC Server handler 4 on default port 36955 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T08:46:49,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741877_1060 (size=6027) 2024-11-19T08:46:49,820 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/2a895e5f2cc0412bb81c26d8bde9d70f 2024-11-19T08:46:49,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/2a895e5f2cc0412bb81c26d8bde9d70f as hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/2a895e5f2cc0412bb81c26d8bde9d70f 2024-11-19T08:46:49,834 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/2a895e5f2cc0412bb81c26d8bde9d70f, entries=1, sequenceid=45, filesize=5.9 K 2024-11-19T08:46:49,836 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for ffe88a4e91dad7fbc582782eb91f58fc in 438ms, sequenceid=45, compaction requested=false 2024-11-19T08:46:49,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ffe88a4e91dad7fbc582782eb91f58fc: 2024-11-19T08:46:49,836 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-19T08:46:49,836 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:46:49,836 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/a66ff0ec34864cfb83489f663f522490 because midkey is the same as first or last row 2024-11-19T08:46:49,911 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:49,912 WARN [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-19T08:46:50,017 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:46:50,020 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:46:50,021 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:46:50,021 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:46:50,021 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T08:46:50,022 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@658ca166{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:46:50,022 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23e1cdda{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:46:50,100 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:50,114 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7eb80544{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/java.io.tmpdir/jetty-localhost-44279-hadoop-hdfs-3_4_1-tests_jar-_-any-16326763496791304143/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:46:50,114 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@216ea3a3{HTTP/1.1, (http/1.1)}{localhost:44279} 2024-11-19T08:46:50,114 INFO [Time-limited test {}] server.Server(415): Started @134642ms 2024-11-19T08:46:50,116 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:46:50,328 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@9512b21[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33279, datanodeUuid=c2c85126-7588-4581-b1ef-3cf98a00816f, infoPort=32841, infoSecurePort=0, ipcPort=46171, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737):Failed to transfer BP-571785619-172.17.0.2-1732005984737:blk_1073741867_1050 to 127.0.0.1:39885 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:50,328 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@cd229e6[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33279, datanodeUuid=c2c85126-7588-4581-b1ef-3cf98a00816f, infoPort=32841, infoSecurePort=0, ipcPort=46171, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737):Failed to transfer BP-571785619-172.17.0.2-1732005984737:blk_1073741872_1055 to 127.0.0.1:39885 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:50,494 WARN [Thread-977 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:46:50,501 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3434a418b4d75a9c with lease ID 0x8808a5ba8148fe23: from storage DS-af2ac2d2-d5bd-42f8-adc7-234974203b47 node DatanodeRegistration(127.0.0.1:44377, datanodeUuid=f0aa2952-855c-402a-84ee-54458d718d25, infoPort=42595, infoSecurePort=0, ipcPort=33431, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T08:46:50,501 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3434a418b4d75a9c with lease ID 0x8808a5ba8148fe23: from storage DS-4639319f-0d0f-4458-a7cc-084a95c0d979 node DatanodeRegistration(127.0.0.1:44377, datanodeUuid=f0aa2952-855c-402a-84ee-54458d718d25, infoPort=42595, infoSecurePort=0, ipcPort=33431, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:46:50,760 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:51,912 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:52,101 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:52,329 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@9512b21[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33279, datanodeUuid=c2c85126-7588-4581-b1ef-3cf98a00816f, infoPort=32841, infoSecurePort=0, ipcPort=46171, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737):Failed to transfer BP-571785619-172.17.0.2-1732005984737:blk_1073741877_1060 to 127.0.0.1:39885 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:52,761 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:53,913 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:54,101 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:54,761 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:55,913 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:56,101 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:56,461 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T08:46:56,762 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:56,990 ERROR [FSHLog-0-hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData-prefix:3ab37fa97a98,41505,1732005986486 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:56,990 WARN [FSHLog-0-hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData-prefix:3ab37fa97a98,41505,1732005986486 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:56,990 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 3ab37fa97a98%2C41505%2C1732005986486:(num 1732005986781) roll requested 2024-11-19T08:46:56,991 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C41505%2C1732005986486.1732006016991 2024-11-19T08:46:56,996 WARN [Thread-997 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:56,996 WARN [Thread-997 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK], DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]) is bad. 2024-11-19T08:46:56,996 WARN [Thread-997 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741878_1061 2024-11-19T08:46:56,997 WARN [Thread-997 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK] 2024-11-19T08:46:57,001 WARN [Thread-997 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39885 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:57,001 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1518076557_22 at /127.0.0.1:48148 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data6]'}, localName='127.0.0.1:33279', datanodeUuid='c2c85126-7588-4581-b1ef-3cf98a00816f', xmitsInProgress=0}:Exception transferring block BP-571785619-172.17.0.2-1732005984737:blk_1073741879_1062 to mirror 127.0.0.1:39885 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:57,002 WARN [Thread-997 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK], DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK]) is bad. 2024-11-19T08:46:57,002 WARN [Thread-997 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741879_1062 2024-11-19T08:46:57,002 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1518076557_22 at /127.0.0.1:48148 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T08:46:57,002 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1518076557_22 at /127.0.0.1:48148 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:33279:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48148 dst: /127.0.0.1:33279 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:57,003 WARN [Thread-997 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39885,DS-e388e9a5-0ff7-41dd-9358-633be1e46f2e,DISK] 2024-11-19T08:46:57,007 WARN [Thread-997 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43925 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:57,007 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1518076557_22 at /127.0.0.1:48154 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data6]'}, localName='127.0.0.1:33279', datanodeUuid='c2c85126-7588-4581-b1ef-3cf98a00816f', xmitsInProgress=0}:Exception transferring block BP-571785619-172.17.0.2-1732005984737:blk_1073741880_1063 to mirror 127.0.0.1:43925 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:57,007 WARN [Thread-997 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK], DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]) is bad. 2024-11-19T08:46:57,007 WARN [Thread-997 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741880_1063 2024-11-19T08:46:57,007 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1518076557_22 at /127.0.0.1:48154 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T08:46:57,007 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1518076557_22 at /127.0.0.1:48154 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:33279:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48154 dst: /127.0.0.1:33279 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:46:57,008 WARN [Thread-997 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK] 2024-11-19T08:46:57,012 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:57,013 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:57,013 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:57,013 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:57,013 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:46:57,013 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/WALs/3ab37fa97a98,41505,1732005986486/3ab37fa97a98%2C41505%2C1732005986486.1732005986781 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/WALs/3ab37fa97a98,41505,1732005986486/3ab37fa97a98%2C41505%2C1732005986486.1732006016991 2024-11-19T08:46:57,013 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:57,014 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:57,014 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/WALs/3ab37fa97a98,41505,1732005986486/3ab37fa97a98%2C41505%2C1732005986486.1732005986781 2024-11-19T08:46:57,014 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32841:32841),(127.0.0.1/127.0.0.1:42595:42595)] 2024-11-19T08:46:57,014 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/WALs/3ab37fa97a98,41505,1732005986486/3ab37fa97a98%2C41505%2C1732005986486.1732005986781 is not closed yet, will try archiving it next time 2024-11-19T08:46:57,014 WARN [IPC Server handler 3 on default port 36955 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/WALs/3ab37fa97a98,41505,1732005986486/3ab37fa97a98%2C41505%2C1732005986486.1732005986781 has not been closed. Lease recovery is in progress. RecoveryId = 1065 for block blk_1073741830_1006 2024-11-19T08:46:57,014 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/WALs/3ab37fa97a98,41505,1732005986486/3ab37fa97a98%2C41505%2C1732005986486.1732005986781 after 0ms 2024-11-19T08:46:57,914 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:58,102 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:46:59,914 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:00,102 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:00,515 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@55b0f970 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-571785619-172.17.0.2-1732005984737:blk_1073741832_1008, datanode=DatanodeInfoWithStorage[127.0.0.1:46153,null,null]) java.net.ConnectException: Call From 3ab37fa97a98/172.17.0.2 to localhost:35143 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-19T08:47:00,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44377 is added to blk_1073741832_1019 (size=455) 2024-11-19T08:47:00,869 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732005987295 to hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/oldWALs/3ab37fa97a98%2C46031%2C1732005986641.1732005987295 2024-11-19T08:47:00,872 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732006005869 to hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/oldWALs/3ab37fa97a98%2C46031%2C1732005986641.1732006005869 2024-11-19T08:47:01,016 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/WALs/3ab37fa97a98,41505,1732005986486/3ab37fa97a98%2C41505%2C1732005986486.1732005986781 after 4002ms 2024-11-19T08:47:01,501 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@64af6b22[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44377, datanodeUuid=f0aa2952-855c-402a-84ee-54458d718d25, infoPort=42595, infoSecurePort=0, ipcPort=33431, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737):Failed to transfer BP-571785619-172.17.0.2-1732005984737:blk_1073741835_1011 to 127.0.0.1:39885 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:01,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741833_1009 (size=32) 2024-11-19T08:47:01,915 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:02,103 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:02,500 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@64af6b22[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44377, datanodeUuid=f0aa2952-855c-402a-84ee-54458d718d25, infoPort=42595, infoSecurePort=0, ipcPort=33431, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737):Failed to transfer BP-571785619-172.17.0.2-1732005984737:blk_1073741829_1005 to 127.0.0.1:39885 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:02,500 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@673608d0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44377, datanodeUuid=f0aa2952-855c-402a-84ee-54458d718d25, infoPort=42595, infoSecurePort=0, ipcPort=33431, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737):Failed to transfer BP-571785619-172.17.0.2-1732005984737:blk_1073741831_1007 to 127.0.0.1:43925 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:03,915 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:04,104 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:04,500 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@673608d0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44377, datanodeUuid=f0aa2952-855c-402a-84ee-54458d718d25, infoPort=42595, infoSecurePort=0, ipcPort=33431, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737):Failed to transfer BP-571785619-172.17.0.2-1732005984737:blk_1073741826_1002 to 127.0.0.1:39885 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:04,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741832_1019 (size=455) 2024-11-19T08:47:05,814 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C46031%2C1732005986641.1732006025814 2024-11-19T08:47:05,821 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:05,821 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:05,821 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:05,821 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:05,821 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:05,822 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732006007886 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732006025814 2024-11-19T08:47:05,822 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42595:42595),(127.0.0.1/127.0.0.1:32841:32841)] 2024-11-19T08:47:05,822 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732006007886 is not closed yet, will try archiving it next time 2024-11-19T08:47:05,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741862_1045 (size=13591) 2024-11-19T08:47:05,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46031 {}] regionserver.HRegion(8855): Flush requested on ffe88a4e91dad7fbc582782eb91f58fc 2024-11-19T08:47:05,833 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ffe88a4e91dad7fbc582782eb91f58fc 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-19T08:47:05,838 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/5c2aa65b493b408992b19f8cc4a2542b is 1080, key is row0013/info:/1732006025824/Put/seqid=0 2024-11-19T08:47:05,841 WARN [Thread-1018 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43925 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:05,841 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:45842 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741883_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data6]'}, localName='127.0.0.1:33279', datanodeUuid='c2c85126-7588-4581-b1ef-3cf98a00816f', xmitsInProgress=0}:Exception transferring block BP-571785619-172.17.0.2-1732005984737:blk_1073741883_1067 to mirror 127.0.0.1:43925 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:05,841 WARN [Thread-1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741883_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK], DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]) is bad. 2024-11-19T08:47:05,842 WARN [Thread-1018 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741883_1067 2024-11-19T08:47:05,842 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:45842 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741883_1067] {}] datanode.BlockReceiver(316): Block 1073741883 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T08:47:05,842 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:45842 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741883_1067] {}] datanode.DataXceiver(331): 127.0.0.1:33279:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45842 dst: /127.0.0.1:33279 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:05,842 WARN [Thread-1018 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK] 2024-11-19T08:47:05,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44377 is added to blk_1073741884_1068 (size=11421) 2024-11-19T08:47:05,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741884_1068 (size=11421) 2024-11-19T08:47:05,848 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/5c2aa65b493b408992b19f8cc4a2542b 2024-11-19T08:47:05,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/5c2aa65b493b408992b19f8cc4a2542b as hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/5c2aa65b493b408992b19f8cc4a2542b 2024-11-19T08:47:05,863 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/5c2aa65b493b408992b19f8cc4a2542b, entries=6, sequenceid=55, filesize=11.2 K 2024-11-19T08:47:05,865 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for ffe88a4e91dad7fbc582782eb91f58fc in 32ms, sequenceid=55, compaction requested=true 2024-11-19T08:47:05,865 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ffe88a4e91dad7fbc582782eb91f58fc: 2024-11-19T08:47:05,865 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-11-19T08:47:05,865 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:47:05,865 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/a66ff0ec34864cfb83489f663f522490 because midkey is the same as first or last row 2024-11-19T08:47:05,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ffe88a4e91dad7fbc582782eb91f58fc:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T08:47:05,866 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T08:47:05,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:47:05,867 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T08:47:05,867 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.HStore(1541): ffe88a4e91dad7fbc582782eb91f58fc/info is initiating minor compaction (all files) 2024-11-19T08:47:05,867 INFO [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ffe88a4e91dad7fbc582782eb91f58fc/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc. 2024-11-19T08:47:05,867 INFO [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/a66ff0ec34864cfb83489f663f522490, hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/2a895e5f2cc0412bb81c26d8bde9d70f, hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/5c2aa65b493b408992b19f8cc4a2542b] into tmpdir=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp, totalSize=34.6 K 2024-11-19T08:47:05,868 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] compactions.Compactor(225): Compacting a66ff0ec34864cfb83489f663f522490, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732006001849 2024-11-19T08:47:05,868 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2a895e5f2cc0412bb81c26d8bde9d70f, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732006009395 2024-11-19T08:47:05,869 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5c2aa65b493b408992b19f8cc4a2542b, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732006009802 2024-11-19T08:47:05,885 INFO [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ffe88a4e91dad7fbc582782eb91f58fc#info#compaction#24 average throughput is 8.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T08:47:05,886 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/b51e027fc2bc4da18e6b4b8961d7137f is 1080, key is row0002/info:/1732006001849/Put/seqid=0 2024-11-19T08:47:05,887 WARN [Thread-1028 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:05,888 WARN [Thread-1028 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741885_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK], DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]) is bad. 2024-11-19T08:47:05,888 WARN [Thread-1028 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741885_1069 2024-11-19T08:47:05,888 WARN [Thread-1028 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK] 2024-11-19T08:47:05,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741886_1070 (size=23502) 2024-11-19T08:47:05,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44377 is added to blk_1073741886_1070 (size=23502) 2024-11-19T08:47:05,906 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/b51e027fc2bc4da18e6b4b8961d7137f as hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/b51e027fc2bc4da18e6b4b8961d7137f 2024-11-19T08:47:05,914 INFO [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ffe88a4e91dad7fbc582782eb91f58fc/info of ffe88a4e91dad7fbc582782eb91f58fc into b51e027fc2bc4da18e6b4b8961d7137f(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T08:47:05,914 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ffe88a4e91dad7fbc582782eb91f58fc: 2024-11-19T08:47:05,914 INFO [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc., storeName=ffe88a4e91dad7fbc582782eb91f58fc/info, priority=13, startTime=1732006025865; duration=0sec 2024-11-19T08:47:05,914 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-19T08:47:05,914 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:47:05,914 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/b51e027fc2bc4da18e6b4b8961d7137f because midkey is the same as first or last row 2024-11-19T08:47:05,914 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-19T08:47:05,915 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:47:05,915 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/b51e027fc2bc4da18e6b4b8961d7137f because midkey is the same as first or last row 2024-11-19T08:47:05,915 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-19T08:47:05,915 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:47:05,915 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/b51e027fc2bc4da18e6b4b8961d7137f because midkey is the same as first or last row 2024-11-19T08:47:05,915 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:47:05,915 DEBUG [RS:0;3ab37fa97a98:46031-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ffe88a4e91dad7fbc582782eb91f58fc:info 2024-11-19T08:47:05,915 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:05,916 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-19T08:47:06,048 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T08:47:06,049 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T08:47:06,049 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:47:06,049 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:47:06,049 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:47:06,049 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T08:47:06,050 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T08:47:06,050 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=962798992, stopped=false 2024-11-19T08:47:06,050 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3ab37fa97a98,41505,1732005986486 2024-11-19T08:47:06,104 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:06,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T08:47:06,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36037-0x101538f93680002, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T08:47:06,132 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46031-0x101538f93680001, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T08:47:06,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36037-0x101538f93680002, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:06,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:06,132 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46031-0x101538f93680001, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:06,132 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T08:47:06,132 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T08:47:06,132 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:47:06,132 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:47:06,133 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:47:06,133 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3ab37fa97a98,46031,1732005986641' ***** 2024-11-19T08:47:06,133 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T08:47:06,133 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3ab37fa97a98,36037,1732005987998' ***** 2024-11-19T08:47:06,133 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T08:47:06,133 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36037-0x101538f93680002, quorum=127.0.0.1:52018, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:47:06,133 INFO [RS:1;3ab37fa97a98:36037 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T08:47:06,133 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T08:47:06,133 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46031-0x101538f93680001, quorum=127.0.0.1:52018, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:47:06,133 INFO [RS:1;3ab37fa97a98:36037 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T08:47:06,133 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T08:47:06,133 INFO [RS:0;3ab37fa97a98:46031 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T08:47:06,133 INFO [RS:0;3ab37fa97a98:46031 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T08:47:06,133 INFO [RS:1;3ab37fa97a98:36037 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T08:47:06,133 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T08:47:06,133 INFO [RS:1;3ab37fa97a98:36037 {}] regionserver.HRegionServer(959): stopping server 3ab37fa97a98,36037,1732005987998 2024-11-19T08:47:06,133 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.HRegionServer(3091): Received CLOSE for ffe88a4e91dad7fbc582782eb91f58fc 2024-11-19T08:47:06,133 INFO [RS:1;3ab37fa97a98:36037 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T08:47:06,133 INFO [RS:1;3ab37fa97a98:36037 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;3ab37fa97a98:36037. 2024-11-19T08:47:06,133 DEBUG [RS:1;3ab37fa97a98:36037 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:47:06,133 DEBUG [RS:1;3ab37fa97a98:36037 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:47:06,134 INFO [RS:1;3ab37fa97a98:36037 {}] regionserver.HRegionServer(976): stopping server 3ab37fa97a98,36037,1732005987998; all regions closed. 2024-11-19T08:47:06,134 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.HRegionServer(959): stopping server 3ab37fa97a98,46031,1732005986641 2024-11-19T08:47:06,134 INFO [RS:0;3ab37fa97a98:46031 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T08:47:06,134 INFO [RS:0;3ab37fa97a98:46031 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3ab37fa97a98:46031. 2024-11-19T08:47:06,134 DEBUG [RS:0;3ab37fa97a98:46031 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:47:06,134 DEBUG [RS:0;3ab37fa97a98:46031 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:47:06,134 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ffe88a4e91dad7fbc582782eb91f58fc, disabling compactions & flushes 2024-11-19T08:47:06,134 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc. 2024-11-19T08:47:06,134 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T08:47:06,134 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc. 2024-11-19T08:47:06,134 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T08:47:06,134 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T08:47:06,134 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc. after waiting 0 ms 2024-11-19T08:47:06,134 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc. 2024-11-19T08:47:06,134 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:06,134 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T08:47:06,134 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:06,134 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing ffe88a4e91dad7fbc582782eb91f58fc 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-11-19T08:47:06,134 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:06,134 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T08:47:06,134 DEBUG [RS:0;3ab37fa97a98:46031 {}] regionserver.HRegionServer(1325): Online Regions={ffe88a4e91dad7fbc582782eb91f58fc=TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc., 1588230740=hbase:meta,,1.1588230740} 2024-11-19T08:47:06,134 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:06,134 DEBUG [RS:0;3ab37fa97a98:46031 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ffe88a4e91dad7fbc582782eb91f58fc 2024-11-19T08:47:06,134 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:06,135 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T08:47:06,135 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T08:47:06,135 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T08:47:06,135 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T08:47:06,135 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T08:47:06,135 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-19T08:47:06,135 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:06,135 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:06,135 ERROR [FSHLog-0-hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882-prefix:3ab37fa97a98,46031,1732005986641.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:06,135 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 2024-11-19T08:47:06,135 WARN [FSHLog-0-hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882-prefix:3ab37fa97a98,46031,1732005986641.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:06,135 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3ab37fa97a98%2C46031%2C1732005986641.meta:.meta(num 1732005987841) roll requested 2024-11-19T08:47:06,136 INFO [regionserver/3ab37fa97a98:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C46031%2C1732005986641.meta.1732006026136.meta 2024-11-19T08:47:06,136 WARN [IPC Server handler 0 on default port 36955 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 has not been closed. Lease recovery is in progress. RecoveryId = 1071 for block blk_1073741837_1013 2024-11-19T08:47:06,136 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 after 1ms 2024-11-19T08:47:06,139 WARN [Thread-1034 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1072 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43925 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:06,139 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:45874 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741887_1072] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data6]'}, localName='127.0.0.1:33279', datanodeUuid='c2c85126-7588-4581-b1ef-3cf98a00816f', xmitsInProgress=0}:Exception transferring block BP-571785619-172.17.0.2-1732005984737:blk_1073741887_1072 to mirror 127.0.0.1:43925 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:06,139 WARN [Thread-1034 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741887_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK], DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]) is bad. 2024-11-19T08:47:06,139 WARN [Thread-1034 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741887_1072 2024-11-19T08:47:06,139 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:45874 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741887_1072] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T08:47:06,140 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:45874 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741887_1072] {}] datanode.DataXceiver(331): 127.0.0.1:33279:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45874 dst: /127.0.0.1:33279 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:06,140 WARN [Thread-1034 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK] 2024-11-19T08:47:06,141 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/fa65ec4d71f946878cbe11ed21e3710e is 1080, key is row0018/info:/1732006025834/Put/seqid=0 2024-11-19T08:47:06,147 WARN [Thread-1035 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1074 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43925 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:06,147 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:44404 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741889_1074] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data4]'}, localName='127.0.0.1:44377', datanodeUuid='f0aa2952-855c-402a-84ee-54458d718d25', xmitsInProgress=0}:Exception transferring block BP-571785619-172.17.0.2-1732005984737:blk_1073741889_1074 to mirror 127.0.0.1:43925 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:06,147 WARN [Thread-1035 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741889_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44377,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK], DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]) is bad. 2024-11-19T08:47:06,147 WARN [Thread-1035 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741889_1074 2024-11-19T08:47:06,147 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:44404 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741889_1074] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T08:47:06,148 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1901702886_22 at /127.0.0.1:44404 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741889_1074] {}] datanode.DataXceiver(331): 127.0.0.1:44377:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44404 dst: /127.0.0.1:44377 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:06,148 WARN [Thread-1035 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK] 2024-11-19T08:47:06,150 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:06,150 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:06,150 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:06,150 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:06,150 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:06,150 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732006026136.meta 2024-11-19T08:47:06,151 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:06,151 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46153,DS-e4ec5e9e-650a-43c4-978d-2d8429898426,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:06,151 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta 2024-11-19T08:47:06,152 WARN [IPC Server handler 3 on default port 36955 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta has not been closed. Lease recovery is in progress. RecoveryId = 1076 for block blk_1073741834_1010 2024-11-19T08:47:06,152 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta after 1ms 2024-11-19T08:47:06,152 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42595:42595),(127.0.0.1/127.0.0.1:32841:32841)] 2024-11-19T08:47:06,152 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta is not closed yet, will try archiving it next time 2024-11-19T08:47:06,156 INFO [regionserver/3ab37fa97a98:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T08:47:06,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741890_1075 (size=11421) 2024-11-19T08:47:06,157 INFO [regionserver/3ab37fa97a98:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T08:47:06,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44377 is added to blk_1073741890_1075 (size=11421) 2024-11-19T08:47:06,158 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/fa65ec4d71f946878cbe11ed21e3710e 2024-11-19T08:47:06,165 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/.tmp/info/fa65ec4d71f946878cbe11ed21e3710e as hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/fa65ec4d71f946878cbe11ed21e3710e 2024-11-19T08:47:06,169 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740/.tmp/info/5be939e71c9e45929aaed3b0eb5487c8 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc./info:regioninfo/1732005988515/Put/seqid=0 2024-11-19T08:47:06,171 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/fa65ec4d71f946878cbe11ed21e3710e, entries=6, sequenceid=65, filesize=11.2 K 2024-11-19T08:47:06,173 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for ffe88a4e91dad7fbc582782eb91f58fc in 38ms, sequenceid=65, compaction requested=false 2024-11-19T08:47:06,173 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/cd6a7701162b494d946f5e5b1a27b1af, hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/4cb4d7c0c4ae41bfb2049fab45e72f8b, hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/a66ff0ec34864cfb83489f663f522490, hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/57930dcce2b14645976e2b5f6cd244e1, hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/2a895e5f2cc0412bb81c26d8bde9d70f, hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/5c2aa65b493b408992b19f8cc4a2542b] to archive 2024-11-19T08:47:06,175 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T08:47:06,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44377 is added to blk_1073741891_1077 (size=7089) 2024-11-19T08:47:06,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741891_1077 (size=7089) 2024-11-19T08:47:06,176 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740/.tmp/info/5be939e71c9e45929aaed3b0eb5487c8 2024-11-19T08:47:06,177 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/cd6a7701162b494d946f5e5b1a27b1af to hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/cd6a7701162b494d946f5e5b1a27b1af 2024-11-19T08:47:06,178 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/4cb4d7c0c4ae41bfb2049fab45e72f8b to hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/4cb4d7c0c4ae41bfb2049fab45e72f8b 2024-11-19T08:47:06,179 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/a66ff0ec34864cfb83489f663f522490 to hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/a66ff0ec34864cfb83489f663f522490 2024-11-19T08:47:06,181 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/57930dcce2b14645976e2b5f6cd244e1 to hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/57930dcce2b14645976e2b5f6cd244e1 2024-11-19T08:47:06,182 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/2a895e5f2cc0412bb81c26d8bde9d70f to hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/2a895e5f2cc0412bb81c26d8bde9d70f 2024-11-19T08:47:06,184 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/5c2aa65b493b408992b19f8cc4a2542b to hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/info/5c2aa65b493b408992b19f8cc4a2542b 2024-11-19T08:47:06,184 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3ab37fa97a98:41505 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-19T08:47:06,185 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [cd6a7701162b494d946f5e5b1a27b1af=10347, 4cb4d7c0c4ae41bfb2049fab45e72f8b=12506, a66ff0ec34864cfb83489f663f522490=17994, 57930dcce2b14645976e2b5f6cd244e1=6027, 2a895e5f2cc0412bb81c26d8bde9d70f=6027, 5c2aa65b493b408992b19f8cc4a2542b=11421] 2024-11-19T08:47:06,189 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffe88a4e91dad7fbc582782eb91f58fc/recovered.edits/68.seqid, newMaxSeqId=68, maxSeqId=1 2024-11-19T08:47:06,190 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc. 2024-11-19T08:47:06,190 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ffe88a4e91dad7fbc582782eb91f58fc: Waiting for close lock at 1732006026134Running coprocessor pre-close hooks at 1732006026134Disabling compacts and flushes for region at 1732006026134Disabling writes for close at 1732006026134Obtaining lock to block concurrent updates at 1732006026134Preparing flush snapshotting stores in ffe88a4e91dad7fbc582782eb91f58fc at 1732006026134Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc., syncing WAL and waiting on mvcc, flushsize=dataSize=6455, getHeapSize=7152, getOffHeapSize=0, getCellsCount=6 at 1732006026135 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc. at 1732006026135Flushing ffe88a4e91dad7fbc582782eb91f58fc/info: creating writer at 1732006026135Flushing ffe88a4e91dad7fbc582782eb91f58fc/info: appending metadata at 1732006026140 (+5 ms)Flushing ffe88a4e91dad7fbc582782eb91f58fc/info: closing flushed file at 1732006026140Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7807479f: reopening flushed file at 1732006026164 (+24 ms)Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for ffe88a4e91dad7fbc582782eb91f58fc in 38ms, sequenceid=65, compaction requested=false at 1732006026173 (+9 ms)Writing region close event to WAL at 1732006026185 (+12 ms)Running coprocessor post-close hooks at 1732006026190 (+5 ms)Closed at 1732006026190 2024-11-19T08:47:06,191 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732005988135.ffe88a4e91dad7fbc582782eb91f58fc. 2024-11-19T08:47:06,198 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740/.tmp/ns/5bff6681f3aa4aec803d760ef322bf38 is 43, key is default/ns:d/1732005987948/Put/seqid=0 2024-11-19T08:47:06,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741892_1078 (size=5153) 2024-11-19T08:47:06,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44377 is added to blk_1073741892_1078 (size=5153) 2024-11-19T08:47:06,203 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740/.tmp/ns/5bff6681f3aa4aec803d760ef322bf38 2024-11-19T08:47:06,224 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.1732006007886 to hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/oldWALs/3ab37fa97a98%2C46031%2C1732005986641.1732006007886 2024-11-19T08:47:06,228 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740/.tmp/table/6f99fc868a9f4760b9d75dfaf7d581f8 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732005988526/Put/seqid=0 2024-11-19T08:47:06,230 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:06,230 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741893_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK], DatanodeInfoWithStorage[127.0.0.1:44377,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]) is bad. 2024-11-19T08:47:06,230 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741893_1079 2024-11-19T08:47:06,231 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK] 2024-11-19T08:47:06,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44377 is added to blk_1073741894_1080 (size=5424) 2024-11-19T08:47:06,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741894_1080 (size=5424) 2024-11-19T08:47:06,237 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740/.tmp/table/6f99fc868a9f4760b9d75dfaf7d581f8 2024-11-19T08:47:06,243 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740/.tmp/info/5be939e71c9e45929aaed3b0eb5487c8 as hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740/info/5be939e71c9e45929aaed3b0eb5487c8 2024-11-19T08:47:06,250 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740/info/5be939e71c9e45929aaed3b0eb5487c8, entries=10, sequenceid=11, filesize=6.9 K 2024-11-19T08:47:06,251 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740/.tmp/ns/5bff6681f3aa4aec803d760ef322bf38 as hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740/ns/5bff6681f3aa4aec803d760ef322bf38 2024-11-19T08:47:06,258 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740/ns/5bff6681f3aa4aec803d760ef322bf38, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T08:47:06,259 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740/.tmp/table/6f99fc868a9f4760b9d75dfaf7d581f8 as hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740/table/6f99fc868a9f4760b9d75dfaf7d581f8 2024-11-19T08:47:06,264 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740/table/6f99fc868a9f4760b9d75dfaf7d581f8, entries=2, sequenceid=11, filesize=5.3 K 2024-11-19T08:47:06,266 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 131ms, sequenceid=11, compaction requested=false 2024-11-19T08:47:06,271 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T08:47:06,272 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T08:47:06,272 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T08:47:06,272 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732006026134Running coprocessor pre-close hooks at 1732006026134Disabling compacts and flushes for region at 1732006026134Disabling writes for close at 1732006026135 (+1 ms)Obtaining lock to block concurrent updates at 1732006026135Preparing flush snapshotting stores in 1588230740 at 1732006026135Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732006026135Flushing stores of hbase:meta,,1.1588230740 at 1732006026153 (+18 ms)Flushing 1588230740/info: creating writer at 1732006026153Flushing 1588230740/info: appending metadata at 1732006026169 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732006026169Flushing 1588230740/ns: creating writer at 1732006026183 (+14 ms)Flushing 1588230740/ns: appending metadata at 1732006026198 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732006026198Flushing 1588230740/table: creating writer at 1732006026209 (+11 ms)Flushing 1588230740/table: appending metadata at 1732006026228 (+19 ms)Flushing 1588230740/table: closing flushed file at 1732006026228Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@750a100c: reopening flushed file at 1732006026242 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f74c8a9: reopening flushed file at 1732006026250 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@68c7618e: reopening flushed file at 1732006026258 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 131ms, sequenceid=11, compaction requested=false at 1732006026266 (+8 ms)Writing region close event to WAL at 1732006026267 (+1 ms)Running coprocessor post-close hooks at 1732006026272 (+5 ms)Closed at 1732006026272 2024-11-19T08:47:06,272 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T08:47:06,335 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.HRegionServer(976): stopping server 3ab37fa97a98,46031,1732005986641; all regions closed. 2024-11-19T08:47:06,335 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:06,335 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:06,336 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:06,336 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:06,336 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:06,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741888_1073 (size=825) 2024-11-19T08:47:06,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44377 is added to blk_1073741888_1073 (size=825) 2024-11-19T08:47:07,100 INFO [regionserver/3ab37fa97a98:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T08:47:07,100 INFO [regionserver/3ab37fa97a98:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T08:47:07,157 INFO [regionserver/3ab37fa97a98:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T08:47:07,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44377 is added to blk_1073741862_1045 (size=13591) 2024-11-19T08:47:07,502 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@673608d0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44377, datanodeUuid=f0aa2952-855c-402a-84ee-54458d718d25, infoPort=42595, infoSecurePort=0, ipcPort=33431, storageInfo=lv=-57;cid=testClusterID;nsid=2037179163;c=1732005984737):Failed to transfer BP-571785619-172.17.0.2-1732005984737:blk_1073741825_1001 to 127.0.0.1:43925 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:07,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741827_1003 (size=196) 2024-11-19T08:47:08,100 INFO [regionserver/3ab37fa97a98:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T08:47:08,270 INFO [master/3ab37fa97a98:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T08:47:08,270 INFO [master/3ab37fa97a98:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T08:47:08,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741836_1012 (size=76) 2024-11-19T08:47:08,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741828_1004 (size=1189) 2024-11-19T08:47:10,138 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 after 4003ms 2024-11-19T08:47:10,154 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta after 4003ms 2024-11-19T08:47:10,521 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@1fc3c43a {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-571785619-172.17.0.2-1732005984737:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:46153,null,null]) java.net.ConnectException: Call From 3ab37fa97a98/172.17.0.2 to localhost:35143 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-19T08:47:11,135 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-19T08:47:11,138 DEBUG [RS:1;3ab37fa97a98:36037 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/oldWALs 2024-11-19T08:47:11,138 INFO [RS:1;3ab37fa97a98:36037 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3ab37fa97a98%2C36037%2C1732005987998:(num 1732005988237) 2024-11-19T08:47:11,138 DEBUG [RS:1;3ab37fa97a98:36037 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:47:11,138 INFO [RS:1;3ab37fa97a98:36037 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T08:47:11,138 INFO [RS:1;3ab37fa97a98:36037 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T08:47:11,138 INFO [RS:1;3ab37fa97a98:36037 {}] hbase.ChoreService(370): Chore service for: regionserver/3ab37fa97a98:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T08:47:11,139 INFO [RS:1;3ab37fa97a98:36037 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T08:47:11,139 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T08:47:11,139 INFO [RS:1;3ab37fa97a98:36037 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T08:47:11,139 INFO [RS:1;3ab37fa97a98:36037 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T08:47:11,139 INFO [RS:1;3ab37fa97a98:36037 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T08:47:11,139 INFO [RS:1;3ab37fa97a98:36037 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36037 2024-11-19T08:47:11,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:11,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:11,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36037-0x101538f93680002, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3ab37fa97a98,36037,1732005987998 2024-11-19T08:47:11,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T08:47:11,198 INFO [RS:1;3ab37fa97a98:36037 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T08:47:11,199 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3ab37fa97a98,36037,1732005987998] 2024-11-19T08:47:11,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:11,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:11,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:11,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:11,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:11,214 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3ab37fa97a98,36037,1732005987998 already deleted, retry=false 2024-11-19T08:47:11,214 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3ab37fa97a98,36037,1732005987998 expired; onlineServers=1 2024-11-19T08:47:11,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:11,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:11,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36037-0x101538f93680002, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:47:11,306 INFO [RS:1;3ab37fa97a98:36037 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T08:47:11,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36037-0x101538f93680002, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:47:11,306 INFO [RS:1;3ab37fa97a98:36037 {}] regionserver.HRegionServer(1031): Exiting; stopping=3ab37fa97a98,36037,1732005987998; zookeeper connection closed. 2024-11-19T08:47:11,307 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2af6090d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2af6090d 2024-11-19T08:47:11,336 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-19T08:47:11,339 DEBUG [RS:0;3ab37fa97a98:46031 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/oldWALs 2024-11-19T08:47:11,339 INFO [RS:0;3ab37fa97a98:46031 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3ab37fa97a98%2C46031%2C1732005986641.meta:.meta(num 1732006026136) 2024-11-19T08:47:11,340 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:11,340 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:11,340 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:11,340 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:11,340 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:11,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741882_1066 (size=15140) 2024-11-19T08:47:11,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44377 is added to blk_1073741882_1066 (size=15140) 2024-11-19T08:47:11,344 DEBUG [RS:0;3ab37fa97a98:46031 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/oldWALs 2024-11-19T08:47:11,344 INFO [RS:0;3ab37fa97a98:46031 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3ab37fa97a98%2C46031%2C1732005986641:(num 1732006025814) 2024-11-19T08:47:11,344 DEBUG [RS:0;3ab37fa97a98:46031 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:47:11,344 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T08:47:11,345 INFO [RS:0;3ab37fa97a98:46031 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T08:47:11,345 INFO [RS:0;3ab37fa97a98:46031 {}] hbase.ChoreService(370): Chore service for: regionserver/3ab37fa97a98:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T08:47:11,345 INFO [RS:0;3ab37fa97a98:46031 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T08:47:11,345 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T08:47:11,345 INFO [RS:0;3ab37fa97a98:46031 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46031 2024-11-19T08:47:11,379 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46031-0x101538f93680001, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3ab37fa97a98,46031,1732005986641 2024-11-19T08:47:11,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T08:47:11,379 INFO [RS:0;3ab37fa97a98:46031 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T08:47:11,379 ERROR [pool-302-thread-1-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$363/0x00007f5c44902658@10acd192 rejected from java.util.concurrent.ThreadPoolExecutor@55ae5de3[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 13] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-19T08:47:11,389 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3ab37fa97a98,46031,1732005986641] 2024-11-19T08:47:11,398 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3ab37fa97a98,46031,1732005986641 already deleted, retry=false 2024-11-19T08:47:11,398 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3ab37fa97a98,46031,1732005986641 expired; onlineServers=0 2024-11-19T08:47:11,398 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3ab37fa97a98,41505,1732005986486' ***** 2024-11-19T08:47:11,398 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T08:47:11,398 INFO [M:0;3ab37fa97a98:41505 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T08:47:11,398 INFO [M:0;3ab37fa97a98:41505 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T08:47:11,398 DEBUG [M:0;3ab37fa97a98:41505 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T08:47:11,398 DEBUG [M:0;3ab37fa97a98:41505 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T08:47:11,398 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T08:47:11,398 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732005986990 {}] cleaner.HFileCleaner(306): Exit Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732005986990,5,FailOnTimeoutGroup] 2024-11-19T08:47:11,398 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732005986990 {}] cleaner.HFileCleaner(306): Exit Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732005986990,5,FailOnTimeoutGroup] 2024-11-19T08:47:11,398 INFO [M:0;3ab37fa97a98:41505 {}] hbase.ChoreService(370): Chore service for: master/3ab37fa97a98:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T08:47:11,399 INFO [M:0;3ab37fa97a98:41505 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T08:47:11,399 DEBUG [M:0;3ab37fa97a98:41505 {}] master.HMaster(1795): Stopping service threads 2024-11-19T08:47:11,399 INFO [M:0;3ab37fa97a98:41505 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T08:47:11,399 INFO [M:0;3ab37fa97a98:41505 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T08:47:11,399 INFO [M:0;3ab37fa97a98:41505 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T08:47:11,399 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T08:47:11,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T08:47:11,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:11,406 DEBUG [M:0;3ab37fa97a98:41505 {}] zookeeper.ZKUtil(347): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T08:47:11,406 WARN [M:0;3ab37fa97a98:41505 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T08:47:11,407 INFO [M:0;3ab37fa97a98:41505 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/.lastflushedseqids 2024-11-19T08:47:11,410 WARN [Thread-1075 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1081 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43925 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:11,410 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1518076557_22 at /127.0.0.1:45950 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741895_1081] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data6]'}, localName='127.0.0.1:33279', datanodeUuid='c2c85126-7588-4581-b1ef-3cf98a00816f', xmitsInProgress=0}:Exception transferring block BP-571785619-172.17.0.2-1732005984737:blk_1073741895_1081 to mirror 127.0.0.1:43925 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:11,410 WARN [Thread-1075 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741895_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK], DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]) is bad. 2024-11-19T08:47:11,410 WARN [Thread-1075 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741895_1081 2024-11-19T08:47:11,410 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1518076557_22 at /127.0.0.1:45950 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741895_1081] {}] datanode.BlockReceiver(316): Block 1073741895 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T08:47:11,410 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1518076557_22 at /127.0.0.1:45950 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741895_1081] {}] datanode.DataXceiver(331): 127.0.0.1:33279:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45950 dst: /127.0.0.1:33279 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:11,411 WARN [Thread-1075 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK] 2024-11-19T08:47:11,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44377 is added to blk_1073741896_1082 (size=130) 2024-11-19T08:47:11,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741896_1082 (size=130) 2024-11-19T08:47:11,419 INFO [M:0;3ab37fa97a98:41505 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T08:47:11,419 INFO [M:0;3ab37fa97a98:41505 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T08:47:11,419 DEBUG [M:0;3ab37fa97a98:41505 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T08:47:11,419 INFO [M:0;3ab37fa97a98:41505 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:47:11,419 DEBUG [M:0;3ab37fa97a98:41505 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:47:11,419 DEBUG [M:0;3ab37fa97a98:41505 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T08:47:11,419 DEBUG [M:0;3ab37fa97a98:41505 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:47:11,420 INFO [M:0;3ab37fa97a98:41505 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-11-19T08:47:11,435 DEBUG [M:0;3ab37fa97a98:41505 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1e5060fba14c45d4b7768b31b81d4007 is 82, key is hbase:meta,,1/info:regioninfo/1732005987869/Put/seqid=0 2024-11-19T08:47:11,438 WARN [Thread-1082 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43925 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:11,438 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1518076557_22 at /127.0.0.1:45962 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741897_1083] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data6]'}, localName='127.0.0.1:33279', datanodeUuid='c2c85126-7588-4581-b1ef-3cf98a00816f', xmitsInProgress=0}:Exception transferring block BP-571785619-172.17.0.2-1732005984737:blk_1073741897_1083 to mirror 127.0.0.1:43925 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:11,438 WARN [Thread-1082 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK], DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]) is bad. 2024-11-19T08:47:11,438 WARN [Thread-1082 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741897_1083 2024-11-19T08:47:11,438 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1518076557_22 at /127.0.0.1:45962 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741897_1083] {}] datanode.BlockReceiver(316): Block 1073741897 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T08:47:11,438 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1518076557_22 at /127.0.0.1:45962 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741897_1083] {}] datanode.DataXceiver(331): 127.0.0.1:33279:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45962 dst: /127.0.0.1:33279 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:11,439 WARN [Thread-1082 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK] 2024-11-19T08:47:11,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44377 is added to blk_1073741898_1084 (size=5672) 2024-11-19T08:47:11,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741898_1084 (size=5672) 2024-11-19T08:47:11,443 INFO [M:0;3ab37fa97a98:41505 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1e5060fba14c45d4b7768b31b81d4007 2024-11-19T08:47:11,465 DEBUG [M:0;3ab37fa97a98:41505 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8cd8b8c43261412bb3681d4238acb38a is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732005988533/Put/seqid=0 2024-11-19T08:47:11,468 WARN [Thread-1090 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43925 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:11,468 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1518076557_22 at /127.0.0.1:44496 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741899_1085] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data4]'}, localName='127.0.0.1:44377', datanodeUuid='f0aa2952-855c-402a-84ee-54458d718d25', xmitsInProgress=0}:Exception transferring block BP-571785619-172.17.0.2-1732005984737:blk_1073741899_1085 to mirror 127.0.0.1:43925 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:11,468 WARN [Thread-1090 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44377,DS-af2ac2d2-d5bd-42f8-adc7-234974203b47,DISK], DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]) is bad. 2024-11-19T08:47:11,468 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1518076557_22 at /127.0.0.1:44496 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741899_1085] {}] datanode.BlockReceiver(316): Block 1073741899 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T08:47:11,468 WARN [Thread-1090 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741899_1085 2024-11-19T08:47:11,468 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1518076557_22 at /127.0.0.1:44496 [Receiving block BP-571785619-172.17.0.2-1732005984737:blk_1073741899_1085] {}] datanode.DataXceiver(331): 127.0.0.1:44377:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44496 dst: /127.0.0.1:44377 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:11,469 WARN [Thread-1090 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK] 2024-11-19T08:47:11,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44377 is added to blk_1073741900_1086 (size=6256) 2024-11-19T08:47:11,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741900_1086 (size=6256) 2024-11-19T08:47:11,481 INFO [M:0;3ab37fa97a98:41505 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8cd8b8c43261412bb3681d4238acb38a 2024-11-19T08:47:11,487 INFO [M:0;3ab37fa97a98:41505 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8cd8b8c43261412bb3681d4238acb38a 2024-11-19T08:47:11,490 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46031-0x101538f93680001, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:47:11,490 INFO [RS:0;3ab37fa97a98:46031 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T08:47:11,490 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46031-0x101538f93680001, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:47:11,490 INFO [RS:0;3ab37fa97a98:46031 {}] regionserver.HRegionServer(1031): Exiting; stopping=3ab37fa97a98,46031,1732005986641; zookeeper connection closed. 2024-11-19T08:47:11,490 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2a231aa3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2a231aa3 2024-11-19T08:47:11,490 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-19T08:47:11,503 DEBUG [M:0;3ab37fa97a98:41505 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a53e86dbe9647319cac056dcb51cf6f is 69, key is 3ab37fa97a98,36037,1732005987998/rs:state/1732005988074/Put/seqid=0 2024-11-19T08:47:11,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741901_1087 (size=5224) 2024-11-19T08:47:11,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44377 is added to blk_1073741901_1087 (size=5224) 2024-11-19T08:47:11,509 INFO [M:0;3ab37fa97a98:41505 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a53e86dbe9647319cac056dcb51cf6f 2024-11-19T08:47:11,535 DEBUG [M:0;3ab37fa97a98:41505 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e65b685c6f4b4fa69e2378202f36eafb is 52, key is load_balancer_on/state:d/1732005987977/Put/seqid=0 2024-11-19T08:47:11,537 WARN [Thread-1103 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1088 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:11,537 WARN [Thread-1103 {}] hdfs.DataStreamer(1731): Error Recovery for BP-571785619-172.17.0.2-1732005984737:blk_1073741902_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK], DatanodeInfoWithStorage[127.0.0.1:33279,DS-5b40f9bd-ac3a-4c58-957f-556aeeb3729e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK]) is bad. 2024-11-19T08:47:11,537 WARN [Thread-1103 {}] hdfs.DataStreamer(1850): Abandoning BP-571785619-172.17.0.2-1732005984737:blk_1073741902_1088 2024-11-19T08:47:11,537 WARN [Thread-1103 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43925,DS-0662beeb-0540-4986-abea-05906b0a61f8,DISK] 2024-11-19T08:47:11,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44377 is added to blk_1073741903_1089 (size=5056) 2024-11-19T08:47:11,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741903_1089 (size=5056) 2024-11-19T08:47:11,543 INFO [M:0;3ab37fa97a98:41505 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e65b685c6f4b4fa69e2378202f36eafb 2024-11-19T08:47:11,550 DEBUG [M:0;3ab37fa97a98:41505 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1e5060fba14c45d4b7768b31b81d4007 as hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1e5060fba14c45d4b7768b31b81d4007 2024-11-19T08:47:11,555 INFO [M:0;3ab37fa97a98:41505 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1e5060fba14c45d4b7768b31b81d4007, entries=8, sequenceid=60, filesize=5.5 K 2024-11-19T08:47:11,556 DEBUG [M:0;3ab37fa97a98:41505 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8cd8b8c43261412bb3681d4238acb38a as hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8cd8b8c43261412bb3681d4238acb38a 2024-11-19T08:47:11,562 INFO [M:0;3ab37fa97a98:41505 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8cd8b8c43261412bb3681d4238acb38a 2024-11-19T08:47:11,562 INFO [M:0;3ab37fa97a98:41505 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8cd8b8c43261412bb3681d4238acb38a, entries=6, sequenceid=60, filesize=6.1 K 2024-11-19T08:47:11,563 DEBUG [M:0;3ab37fa97a98:41505 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a53e86dbe9647319cac056dcb51cf6f as hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5a53e86dbe9647319cac056dcb51cf6f 2024-11-19T08:47:11,568 INFO [M:0;3ab37fa97a98:41505 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5a53e86dbe9647319cac056dcb51cf6f, entries=2, sequenceid=60, filesize=5.1 K 2024-11-19T08:47:11,569 DEBUG [M:0;3ab37fa97a98:41505 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e65b685c6f4b4fa69e2378202f36eafb as hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e65b685c6f4b4fa69e2378202f36eafb 2024-11-19T08:47:11,573 INFO [M:0;3ab37fa97a98:41505 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e65b685c6f4b4fa69e2378202f36eafb, entries=1, sequenceid=60, filesize=4.9 K 2024-11-19T08:47:11,575 INFO [M:0;3ab37fa97a98:41505 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 155ms, sequenceid=60, compaction requested=false 2024-11-19T08:47:11,576 INFO [M:0;3ab37fa97a98:41505 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:47:11,576 DEBUG [M:0;3ab37fa97a98:41505 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732006031419Disabling compacts and flushes for region at 1732006031419Disabling writes for close at 1732006031419Obtaining lock to block concurrent updates at 1732006031420 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732006031420Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1732006031420Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732006031421 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732006031421Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732006031435 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732006031435Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732006031448 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732006031464 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732006031464Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732006031487 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732006031503 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732006031503Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732006031515 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732006031534 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732006031534Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6e7420d8: reopening flushed file at 1732006031549 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@466177d2: reopening flushed file at 1732006031555 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@712c50aa: reopening flushed file at 1732006031562 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@59d6a360: reopening flushed file at 1732006031568 (+6 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 155ms, sequenceid=60, compaction requested=false at 1732006031575 (+7 ms)Writing region close event to WAL at 1732006031576 (+1 ms)Closed at 1732006031576 2024-11-19T08:47:11,576 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:11,577 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:11,577 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:11,577 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:11,577 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:11,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44377 is added to blk_1073741881_1064 (size=1045) 2024-11-19T08:47:11,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33279 is added to blk_1073741881_1064 (size=1045) 2024-11-19T08:47:11,579 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T08:47:11,580 INFO [M:0;3ab37fa97a98:41505 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T08:47:11,580 INFO [M:0;3ab37fa97a98:41505 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41505 2024-11-19T08:47:11,580 INFO [M:0;3ab37fa97a98:41505 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T08:47:11,723 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T08:47:11,744 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:11,745 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:11,745 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:11,746 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:11,746 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:11,750 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:11,750 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:11,752 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:11,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:47:11,763 INFO [M:0;3ab37fa97a98:41505 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T08:47:11,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41505-0x101538f93680000, quorum=127.0.0.1:52018, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:47:11,765 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7eb80544{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:47:11,766 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@216ea3a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:47:11,766 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:47:11,766 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23e1cdda{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:47:11,766 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@658ca166{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/hadoop.log.dir/,STOPPED} 2024-11-19T08:47:11,767 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:47:11,767 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:47:11,767 WARN [BP-571785619-172.17.0.2-1732005984737 heartbeating to localhost/127.0.0.1:36955 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:47:11,767 WARN [BP-571785619-172.17.0.2-1732005984737 heartbeating to localhost/127.0.0.1:36955 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-571785619-172.17.0.2-1732005984737 (Datanode Uuid f0aa2952-855c-402a-84ee-54458d718d25) service to localhost/127.0.0.1:36955 2024-11-19T08:47:11,767 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@359f063f {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-571785619-172.17.0.2-1732005984737:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:46153,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:35143 , LocalHost:localPort 3ab37fa97a98/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-19T08:47:11,767 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@359f063f {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-571785619-172.17.0.2-1732005984737:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:44377,null,null]) java.io.IOException: No block pool offer service for bpid=BP-571785619-172.17.0.2-1732005984737 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:11,768 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@359f063f {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-571785619-172.17.0.2-1732005984737:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:46153,null,null], DatanodeInfoWithStorage[127.0.0.1:44377,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-571785619-172.17.0.2-1732005984737:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:46153,null,null], DatanodeInfoWithStorage[127.0.0.1:44377,null,null]] 2024-11-19T08:47:11,768 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@359f063f {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-571785619-172.17.0.2-1732005984737:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:46153,null,null]) java.io.IOException: No block pool offer service for bpid=BP-571785619-172.17.0.2-1732005984737 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:11,768 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@359f063f {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-571785619-172.17.0.2-1732005984737:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:44377,null,null]) java.io.IOException: No block pool offer service for bpid=BP-571785619-172.17.0.2-1732005984737 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:11,768 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@359f063f {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-571785619-172.17.0.2-1732005984737:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:46153,null,null], DatanodeInfoWithStorage[127.0.0.1:44377,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-571785619-172.17.0.2-1732005984737:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:46153,null,null], DatanodeInfoWithStorage[127.0.0.1:44377,null,null]] 2024-11-19T08:47:11,768 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data3/current/BP-571785619-172.17.0.2-1732005984737 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:47:11,768 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data4/current/BP-571785619-172.17.0.2-1732005984737 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:47:11,769 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:47:11,774 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@21371268{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:47:11,775 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@138afca9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:47:11,775 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:47:11,775 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13bc47a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:47:11,775 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c68a5ac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/hadoop.log.dir/,STOPPED} 2024-11-19T08:47:11,776 WARN [BP-571785619-172.17.0.2-1732005984737 heartbeating to localhost/127.0.0.1:36955 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:47:11,776 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:47:11,776 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:47:11,776 WARN [BP-571785619-172.17.0.2-1732005984737 heartbeating to localhost/127.0.0.1:36955 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-571785619-172.17.0.2-1732005984737 (Datanode Uuid c2c85126-7588-4581-b1ef-3cf98a00816f) service to localhost/127.0.0.1:36955 2024-11-19T08:47:11,777 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data5/current/BP-571785619-172.17.0.2-1732005984737 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:47:11,777 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/cluster_54eadf65-754b-cd42-7421-46bee40ded88/data/data6/current/BP-571785619-172.17.0.2-1732005984737 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:47:11,777 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:47:11,782 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e477d02{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T08:47:11,782 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@789b23ca{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:47:11,782 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:47:11,783 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78100011{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:47:11,783 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@342817d8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/hadoop.log.dir/,STOPPED} 2024-11-19T08:47:11,790 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T08:47:11,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T08:47:11,827 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=155 (was 79) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:36955 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:42561 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36955 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:36955 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:36955 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36955 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36955 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42561 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36955 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36955 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f5c44befdc8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f5c44befdc8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:36955 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:36955 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:36955 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=452 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=185 (was 287), ProcessCount=11 (was 11), AvailableMemoryMB=5717 (was 6418) 2024-11-19T08:47:11,833 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=155, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=185, ProcessCount=11, AvailableMemoryMB=5717 2024-11-19T08:47:11,833 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T08:47:11,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/hadoop.log.dir so I do NOT create it in target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73 2024-11-19T08:47:11,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d944403e-8064-ccf7-a0d0-302593ff098c/hadoop.tmp.dir so I do NOT create it in target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73 2024-11-19T08:47:11,834 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/cluster_f14576ae-8d26-96c4-d79a-1b1d2b202a8b, deleteOnExit=true 2024-11-19T08:47:11,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T08:47:11,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/test.cache.data in system properties and HBase conf 2024-11-19T08:47:11,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T08:47:11,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/hadoop.log.dir in system properties and HBase conf 2024-11-19T08:47:11,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T08:47:11,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T08:47:11,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T08:47:11,835 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T08:47:11,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T08:47:11,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T08:47:11,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T08:47:11,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T08:47:11,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T08:47:11,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T08:47:11,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T08:47:11,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T08:47:11,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T08:47:11,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/nfs.dump.dir in system properties and HBase conf 2024-11-19T08:47:11,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/java.io.tmpdir in system properties and HBase conf 2024-11-19T08:47:11,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T08:47:11,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T08:47:11,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T08:47:11,847 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T08:47:12,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:12,152 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:47:12,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:12,157 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:47:12,158 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:47:12,158 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:47:12,158 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T08:47:12,159 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:47:12,159 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a7a3a77{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:47:12,160 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@515d5cec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:47:12,251 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5a083e6f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/java.io.tmpdir/jetty-localhost-45377-hadoop-hdfs-3_4_1-tests_jar-_-any-3642314288000908978/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T08:47:12,252 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@235bcdaa{HTTP/1.1, (http/1.1)}{localhost:45377} 2024-11-19T08:47:12,252 INFO [Time-limited test {}] server.Server(415): Started @156779ms 2024-11-19T08:47:12,263 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T08:47:12,473 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:47:12,476 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:47:12,477 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:47:12,477 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:47:12,477 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T08:47:12,479 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@675d0ec1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:47:12,479 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5260e8b3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:47:12,492 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-19T08:47:12,492 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T08:47:12,492 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T08:47:12,492 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T08:47:12,570 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@667c15a8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/java.io.tmpdir/jetty-localhost-46761-hadoop-hdfs-3_4_1-tests_jar-_-any-8668299762921301744/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:47:12,570 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@633966fa{HTTP/1.1, (http/1.1)}{localhost:46761} 2024-11-19T08:47:12,571 INFO [Time-limited test {}] server.Server(415): Started @157099ms 2024-11-19T08:47:12,572 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:47:12,595 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:47:12,599 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:47:12,600 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:47:12,600 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:47:12,600 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T08:47:12,600 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@582f5b19{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:47:12,601 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1afad5ae{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:47:12,696 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7ab3aff0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/java.io.tmpdir/jetty-localhost-45283-hadoop-hdfs-3_4_1-tests_jar-_-any-2951824076476757355/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:47:12,696 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69606aa6{HTTP/1.1, (http/1.1)}{localhost:45283} 2024-11-19T08:47:12,696 INFO [Time-limited test {}] server.Server(415): Started @157224ms 2024-11-19T08:47:12,697 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:47:13,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:13,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:13,292 WARN [Thread-1199 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/cluster_f14576ae-8d26-96c4-d79a-1b1d2b202a8b/data/data1/current/BP-1281909225-172.17.0.2-1732006031857/current, will proceed with Du for space computation calculation, 2024-11-19T08:47:13,292 WARN [Thread-1200 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/cluster_f14576ae-8d26-96c4-d79a-1b1d2b202a8b/data/data2/current/BP-1281909225-172.17.0.2-1732006031857/current, will proceed with Du for space computation calculation, 2024-11-19T08:47:13,313 WARN [Thread-1163 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:47:13,315 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf4fb7ea2d0a19a23 with lease ID 0x188196281fb58ef5: Processing first storage report for DS-fa53e452-9594-4d74-a5ce-dca1a1394415 from datanode DatanodeRegistration(127.0.0.1:44705, datanodeUuid=f95d6baa-f132-465b-b24f-1a4a66163a87, infoPort=37891, infoSecurePort=0, ipcPort=39677, storageInfo=lv=-57;cid=testClusterID;nsid=2047881705;c=1732006031857) 2024-11-19T08:47:13,315 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf4fb7ea2d0a19a23 with lease ID 0x188196281fb58ef5: from storage DS-fa53e452-9594-4d74-a5ce-dca1a1394415 node DatanodeRegistration(127.0.0.1:44705, datanodeUuid=f95d6baa-f132-465b-b24f-1a4a66163a87, infoPort=37891, infoSecurePort=0, ipcPort=39677, storageInfo=lv=-57;cid=testClusterID;nsid=2047881705;c=1732006031857), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:47:13,315 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf4fb7ea2d0a19a23 with lease ID 0x188196281fb58ef5: Processing first storage report for DS-6979ebd7-950b-4396-baa6-79da31b047ec from datanode DatanodeRegistration(127.0.0.1:44705, datanodeUuid=f95d6baa-f132-465b-b24f-1a4a66163a87, infoPort=37891, infoSecurePort=0, ipcPort=39677, storageInfo=lv=-57;cid=testClusterID;nsid=2047881705;c=1732006031857) 2024-11-19T08:47:13,315 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf4fb7ea2d0a19a23 with lease ID 0x188196281fb58ef5: from storage DS-6979ebd7-950b-4396-baa6-79da31b047ec node DatanodeRegistration(127.0.0.1:44705, datanodeUuid=f95d6baa-f132-465b-b24f-1a4a66163a87, infoPort=37891, infoSecurePort=0, ipcPort=39677, storageInfo=lv=-57;cid=testClusterID;nsid=2047881705;c=1732006031857), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T08:47:13,428 WARN [Thread-1210 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/cluster_f14576ae-8d26-96c4-d79a-1b1d2b202a8b/data/data3/current/BP-1281909225-172.17.0.2-1732006031857/current, will proceed with Du for space computation calculation, 2024-11-19T08:47:13,428 WARN [Thread-1211 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/cluster_f14576ae-8d26-96c4-d79a-1b1d2b202a8b/data/data4/current/BP-1281909225-172.17.0.2-1732006031857/current, will proceed with Du for space computation calculation, 2024-11-19T08:47:13,448 WARN [Thread-1186 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:47:13,450 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x73c206d05097aab with lease ID 0x188196281fb58ef6: Processing first storage report for DS-d4d3f402-eb12-4e47-8720-2371f3cf52ed from datanode DatanodeRegistration(127.0.0.1:37513, datanodeUuid=8c4ebc63-1b63-448c-b11d-922067cd4f1c, infoPort=41245, infoSecurePort=0, ipcPort=45593, storageInfo=lv=-57;cid=testClusterID;nsid=2047881705;c=1732006031857) 2024-11-19T08:47:13,450 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x73c206d05097aab with lease ID 0x188196281fb58ef6: from storage DS-d4d3f402-eb12-4e47-8720-2371f3cf52ed node DatanodeRegistration(127.0.0.1:37513, datanodeUuid=8c4ebc63-1b63-448c-b11d-922067cd4f1c, infoPort=41245, infoSecurePort=0, ipcPort=45593, storageInfo=lv=-57;cid=testClusterID;nsid=2047881705;c=1732006031857), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:47:13,450 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x73c206d05097aab with lease ID 0x188196281fb58ef6: Processing first storage report for DS-a8cb0989-22fb-438d-bb95-7e2e60a583ab from datanode DatanodeRegistration(127.0.0.1:37513, datanodeUuid=8c4ebc63-1b63-448c-b11d-922067cd4f1c, infoPort=41245, infoSecurePort=0, ipcPort=45593, storageInfo=lv=-57;cid=testClusterID;nsid=2047881705;c=1732006031857) 2024-11-19T08:47:13,450 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x73c206d05097aab with lease ID 0x188196281fb58ef6: from storage DS-a8cb0989-22fb-438d-bb95-7e2e60a583ab node DatanodeRegistration(127.0.0.1:37513, datanodeUuid=8c4ebc63-1b63-448c-b11d-922067cd4f1c, infoPort=41245, infoSecurePort=0, ipcPort=45593, storageInfo=lv=-57;cid=testClusterID;nsid=2047881705;c=1732006031857), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T08:47:13,530 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73 2024-11-19T08:47:13,535 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/cluster_f14576ae-8d26-96c4-d79a-1b1d2b202a8b/zookeeper_0, clientPort=60070, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/cluster_f14576ae-8d26-96c4-d79a-1b1d2b202a8b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/cluster_f14576ae-8d26-96c4-d79a-1b1d2b202a8b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T08:47:13,535 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60070 2024-11-19T08:47:13,536 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:47:13,537 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:47:13,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37513 is added to blk_1073741825_1001 (size=7) 2024-11-19T08:47:13,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741825_1001 (size=7) 2024-11-19T08:47:13,547 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252 with version=8 2024-11-19T08:47:13,547 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/hbase-staging 2024-11-19T08:47:13,549 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3ab37fa97a98:0 server-side Connection retries=45 2024-11-19T08:47:13,549 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:47:13,549 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T08:47:13,549 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T08:47:13,549 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:47:13,549 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T08:47:13,549 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T08:47:13,549 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T08:47:13,550 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39659 2024-11-19T08:47:13,552 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39659 connecting to ZooKeeper ensemble=127.0.0.1:60070 2024-11-19T08:47:13,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:396590x0, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T08:47:13,601 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39659-0x10153904b430000 connected 2024-11-19T08:47:13,681 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:47:13,683 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:47:13,685 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:47:13,686 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252, hbase.cluster.distributed=false 2024-11-19T08:47:13,688 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T08:47:13,689 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39659 2024-11-19T08:47:13,689 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39659 2024-11-19T08:47:13,689 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39659 2024-11-19T08:47:13,690 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39659 2024-11-19T08:47:13,690 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39659 2024-11-19T08:47:13,709 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3ab37fa97a98:0 server-side Connection retries=45 2024-11-19T08:47:13,709 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:47:13,709 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T08:47:13,709 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T08:47:13,709 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:47:13,709 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T08:47:13,709 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T08:47:13,709 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T08:47:13,710 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45945 2024-11-19T08:47:13,711 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45945 connecting to ZooKeeper ensemble=127.0.0.1:60070 2024-11-19T08:47:13,712 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:47:13,713 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:47:13,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:459450x0, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T08:47:13,723 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45945-0x10153904b430001 connected 2024-11-19T08:47:13,723 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:47:13,723 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T08:47:13,724 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T08:47:13,725 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T08:47:13,726 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T08:47:13,727 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45945 2024-11-19T08:47:13,727 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45945 2024-11-19T08:47:13,729 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45945 2024-11-19T08:47:13,729 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45945 2024-11-19T08:47:13,729 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45945 2024-11-19T08:47:13,742 DEBUG [M:0;3ab37fa97a98:39659 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3ab37fa97a98:39659 2024-11-19T08:47:13,742 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3ab37fa97a98,39659,1732006033548 2024-11-19T08:47:13,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:47:13,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:47:13,753 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3ab37fa97a98,39659,1732006033548 2024-11-19T08:47:13,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T08:47:13,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:13,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:13,765 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T08:47:13,765 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3ab37fa97a98,39659,1732006033548 from backup master directory 2024-11-19T08:47:13,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3ab37fa97a98,39659,1732006033548 2024-11-19T08:47:13,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:47:13,772 WARN [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T08:47:13,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:47:13,773 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3ab37fa97a98,39659,1732006033548 2024-11-19T08:47:13,780 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/hbase.id] with ID: 03fa1691-d517-4258-94b2-77e236eb458e 2024-11-19T08:47:13,780 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/.tmp/hbase.id 2024-11-19T08:47:13,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741826_1002 (size=42) 2024-11-19T08:47:13,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37513 is added to blk_1073741826_1002 (size=42) 2024-11-19T08:47:13,790 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/.tmp/hbase.id]:[hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/hbase.id] 2024-11-19T08:47:13,805 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:47:13,806 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T08:47:13,807 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-19T08:47:13,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:13,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:13,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37513 is added to blk_1073741827_1003 (size=196) 2024-11-19T08:47:13,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741827_1003 (size=196) 2024-11-19T08:47:13,827 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T08:47:13,828 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T08:47:13,828 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:47:13,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37513 is added to blk_1073741828_1004 (size=1189) 2024-11-19T08:47:13,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741828_1004 (size=1189) 2024-11-19T08:47:13,836 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store 2024-11-19T08:47:13,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37513 is added to blk_1073741829_1005 (size=34) 2024-11-19T08:47:13,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741829_1005 (size=34) 2024-11-19T08:47:13,844 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:47:13,844 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T08:47:13,844 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:47:13,844 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:47:13,844 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T08:47:13,844 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:47:13,844 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:47:13,844 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732006033844Disabling compacts and flushes for region at 1732006033844Disabling writes for close at 1732006033844Writing region close event to WAL at 1732006033844Closed at 1732006033844 2024-11-19T08:47:13,845 WARN [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/.initializing 2024-11-19T08:47:13,845 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/WALs/3ab37fa97a98,39659,1732006033548 2024-11-19T08:47:13,847 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C39659%2C1732006033548, suffix=, logDir=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/WALs/3ab37fa97a98,39659,1732006033548, archiveDir=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/oldWALs, maxLogs=10 2024-11-19T08:47:13,848 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C39659%2C1732006033548.1732006033847 2024-11-19T08:47:13,852 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/WALs/3ab37fa97a98,39659,1732006033548/3ab37fa97a98%2C39659%2C1732006033548.1732006033847 2024-11-19T08:47:13,853 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41245:41245),(127.0.0.1/127.0.0.1:37891:37891)] 2024-11-19T08:47:13,853 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T08:47:13,854 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:47:13,854 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:13,854 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:13,855 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:13,856 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T08:47:13,856 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:13,857 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:47:13,857 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:13,858 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T08:47:13,858 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:13,859 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:47:13,859 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:13,860 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T08:47:13,860 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:13,860 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:47:13,860 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:13,861 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T08:47:13,862 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:13,862 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:47:13,862 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:13,863 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:13,863 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:13,864 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:13,864 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:13,865 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T08:47:13,866 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:13,868 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T08:47:13,869 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=776528, jitterRate=-0.012594014406204224}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T08:47:13,870 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732006033854Initializing all the Stores at 1732006033855 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006033855Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006033855Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006033855Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006033855Cleaning up temporary data from old regions at 1732006033864 (+9 ms)Region opened successfully at 1732006033870 (+6 ms) 2024-11-19T08:47:13,870 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T08:47:13,874 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79994f0b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3ab37fa97a98/172.17.0.2:0 2024-11-19T08:47:13,875 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T08:47:13,875 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T08:47:13,875 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T08:47:13,875 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T08:47:13,876 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T08:47:13,876 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T08:47:13,876 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T08:47:13,878 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T08:47:13,878 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T08:47:13,889 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T08:47:13,889 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T08:47:13,890 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T08:47:13,897 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T08:47:13,897 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T08:47:13,898 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T08:47:13,905 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T08:47:13,906 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T08:47:13,914 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T08:47:13,918 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T08:47:13,928 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T08:47:13,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T08:47:13,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T08:47:13,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:13,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:13,940 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3ab37fa97a98,39659,1732006033548, sessionid=0x10153904b430000, setting cluster-up flag (Was=false) 2024-11-19T08:47:13,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:13,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:13,981 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T08:47:13,983 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3ab37fa97a98,39659,1732006033548 2024-11-19T08:47:14,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:14,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:14,031 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T08:47:14,035 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3ab37fa97a98,39659,1732006033548 2024-11-19T08:47:14,039 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T08:47:14,042 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T08:47:14,043 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T08:47:14,043 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T08:47:14,044 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3ab37fa97a98,39659,1732006033548 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T08:47:14,045 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:47:14,045 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:47:14,045 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:47:14,046 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:47:14,046 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3ab37fa97a98:0, corePoolSize=10, maxPoolSize=10 2024-11-19T08:47:14,046 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:14,046 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=2, maxPoolSize=2 2024-11-19T08:47:14,046 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:14,048 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:47:14,048 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T08:47:14,050 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:14,050 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T08:47:14,054 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732006064054 2024-11-19T08:47:14,054 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T08:47:14,054 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T08:47:14,054 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T08:47:14,054 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T08:47:14,054 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T08:47:14,054 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T08:47:14,055 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,055 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T08:47:14,055 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T08:47:14,055 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T08:47:14,056 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T08:47:14,056 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T08:47:14,056 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732006034056,5,FailOnTimeoutGroup] 2024-11-19T08:47:14,057 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732006034056,5,FailOnTimeoutGroup] 2024-11-19T08:47:14,057 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,058 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T08:47:14,058 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,058 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741831_1007 (size=1321) 2024-11-19T08:47:14,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37513 is added to blk_1073741831_1007 (size=1321) 2024-11-19T08:47:14,060 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T08:47:14,061 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252 2024-11-19T08:47:14,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741832_1008 (size=32) 2024-11-19T08:47:14,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37513 is added to blk_1073741832_1008 (size=32) 2024-11-19T08:47:14,073 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:47:14,077 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T08:47:14,078 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T08:47:14,078 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:14,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:47:14,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T08:47:14,080 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T08:47:14,080 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:14,081 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:47:14,081 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T08:47:14,082 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T08:47:14,082 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:14,083 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:47:14,083 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T08:47:14,084 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T08:47:14,084 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:14,085 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:47:14,085 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T08:47:14,085 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740 2024-11-19T08:47:14,086 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740 2024-11-19T08:47:14,087 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T08:47:14,087 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T08:47:14,087 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T08:47:14,088 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T08:47:14,090 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T08:47:14,091 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=720824, jitterRate=-0.0834258496761322}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T08:47:14,091 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732006034073Initializing all the Stores at 1732006034074 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006034074Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006034077 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006034077Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006034077Cleaning up temporary data from old regions at 1732006034087 (+10 ms)Region opened successfully at 1732006034091 (+4 ms) 2024-11-19T08:47:14,091 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T08:47:14,091 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T08:47:14,091 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T08:47:14,091 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T08:47:14,091 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T08:47:14,092 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T08:47:14,092 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732006034091Disabling compacts and flushes for region at 1732006034091Disabling writes for close at 1732006034091Writing region close event to WAL at 1732006034092 (+1 ms)Closed at 1732006034092 2024-11-19T08:47:14,093 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:47:14,093 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T08:47:14,093 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T08:47:14,094 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T08:47:14,095 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T08:47:14,131 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.HRegionServer(746): ClusterId : 03fa1691-d517-4258-94b2-77e236eb458e 2024-11-19T08:47:14,132 DEBUG [RS:0;3ab37fa97a98:45945 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T08:47:14,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:14,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:14,157 DEBUG [RS:0;3ab37fa97a98:45945 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T08:47:14,157 DEBUG [RS:0;3ab37fa97a98:45945 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T08:47:14,165 DEBUG [RS:0;3ab37fa97a98:45945 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T08:47:14,166 DEBUG [RS:0;3ab37fa97a98:45945 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57f3ba8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3ab37fa97a98/172.17.0.2:0 2024-11-19T08:47:14,182 DEBUG [RS:0;3ab37fa97a98:45945 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3ab37fa97a98:45945 2024-11-19T08:47:14,182 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T08:47:14,182 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T08:47:14,182 DEBUG [RS:0;3ab37fa97a98:45945 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T08:47:14,183 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.HRegionServer(2659): reportForDuty to master=3ab37fa97a98,39659,1732006033548 with port=45945, startcode=1732006033709 2024-11-19T08:47:14,184 DEBUG [RS:0;3ab37fa97a98:45945 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T08:47:14,186 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57939, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T08:47:14,186 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39659 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3ab37fa97a98,45945,1732006033709 2024-11-19T08:47:14,186 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39659 {}] master.ServerManager(517): Registering regionserver=3ab37fa97a98,45945,1732006033709 2024-11-19T08:47:14,188 DEBUG [RS:0;3ab37fa97a98:45945 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252 2024-11-19T08:47:14,188 DEBUG [RS:0;3ab37fa97a98:45945 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36463 2024-11-19T08:47:14,189 DEBUG [RS:0;3ab37fa97a98:45945 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T08:47:14,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T08:47:14,198 DEBUG [RS:0;3ab37fa97a98:45945 {}] zookeeper.ZKUtil(111): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3ab37fa97a98,45945,1732006033709 2024-11-19T08:47:14,198 WARN [RS:0;3ab37fa97a98:45945 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T08:47:14,198 INFO [RS:0;3ab37fa97a98:45945 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:47:14,198 DEBUG [RS:0;3ab37fa97a98:45945 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709 2024-11-19T08:47:14,198 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3ab37fa97a98,45945,1732006033709] 2024-11-19T08:47:14,202 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T08:47:14,204 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T08:47:14,204 INFO [RS:0;3ab37fa97a98:45945 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T08:47:14,204 INFO [RS:0;3ab37fa97a98:45945 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,204 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T08:47:14,205 INFO [RS:0;3ab37fa97a98:45945 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T08:47:14,205 INFO [RS:0;3ab37fa97a98:45945 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,206 DEBUG [RS:0;3ab37fa97a98:45945 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:14,206 DEBUG [RS:0;3ab37fa97a98:45945 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:14,206 DEBUG [RS:0;3ab37fa97a98:45945 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:14,206 DEBUG [RS:0;3ab37fa97a98:45945 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:14,206 DEBUG [RS:0;3ab37fa97a98:45945 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:14,206 DEBUG [RS:0;3ab37fa97a98:45945 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3ab37fa97a98:0, corePoolSize=2, maxPoolSize=2 2024-11-19T08:47:14,206 DEBUG [RS:0;3ab37fa97a98:45945 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:14,206 DEBUG [RS:0;3ab37fa97a98:45945 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:14,206 DEBUG [RS:0;3ab37fa97a98:45945 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:14,206 DEBUG [RS:0;3ab37fa97a98:45945 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:14,206 DEBUG [RS:0;3ab37fa97a98:45945 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:14,206 DEBUG [RS:0;3ab37fa97a98:45945 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:14,206 DEBUG [RS:0;3ab37fa97a98:45945 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3ab37fa97a98:0, corePoolSize=3, maxPoolSize=3 2024-11-19T08:47:14,206 DEBUG [RS:0;3ab37fa97a98:45945 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0, corePoolSize=3, maxPoolSize=3 2024-11-19T08:47:14,207 INFO [RS:0;3ab37fa97a98:45945 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,207 INFO [RS:0;3ab37fa97a98:45945 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,207 INFO [RS:0;3ab37fa97a98:45945 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,207 INFO [RS:0;3ab37fa97a98:45945 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,207 INFO [RS:0;3ab37fa97a98:45945 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,207 INFO [RS:0;3ab37fa97a98:45945 {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,45945,1732006033709-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T08:47:14,221 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T08:47:14,221 INFO [RS:0;3ab37fa97a98:45945 {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,45945,1732006033709-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,221 INFO [RS:0;3ab37fa97a98:45945 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,221 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.Replication(171): 3ab37fa97a98,45945,1732006033709 started 2024-11-19T08:47:14,234 INFO [RS:0;3ab37fa97a98:45945 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,234 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.HRegionServer(1482): Serving as 3ab37fa97a98,45945,1732006033709, RpcServer on 3ab37fa97a98/172.17.0.2:45945, sessionid=0x10153904b430001 2024-11-19T08:47:14,234 DEBUG [RS:0;3ab37fa97a98:45945 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T08:47:14,234 DEBUG [RS:0;3ab37fa97a98:45945 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3ab37fa97a98,45945,1732006033709 2024-11-19T08:47:14,234 DEBUG [RS:0;3ab37fa97a98:45945 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3ab37fa97a98,45945,1732006033709' 2024-11-19T08:47:14,234 DEBUG [RS:0;3ab37fa97a98:45945 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T08:47:14,235 DEBUG [RS:0;3ab37fa97a98:45945 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T08:47:14,236 DEBUG [RS:0;3ab37fa97a98:45945 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T08:47:14,236 DEBUG [RS:0;3ab37fa97a98:45945 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T08:47:14,236 DEBUG [RS:0;3ab37fa97a98:45945 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3ab37fa97a98,45945,1732006033709 2024-11-19T08:47:14,236 DEBUG [RS:0;3ab37fa97a98:45945 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3ab37fa97a98,45945,1732006033709' 2024-11-19T08:47:14,236 DEBUG [RS:0;3ab37fa97a98:45945 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T08:47:14,236 DEBUG [RS:0;3ab37fa97a98:45945 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T08:47:14,237 DEBUG [RS:0;3ab37fa97a98:45945 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T08:47:14,237 INFO [RS:0;3ab37fa97a98:45945 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T08:47:14,237 INFO [RS:0;3ab37fa97a98:45945 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T08:47:14,245 WARN [3ab37fa97a98:39659 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T08:47:14,339 INFO [RS:0;3ab37fa97a98:45945 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C45945%2C1732006033709, suffix=, logDir=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709, archiveDir=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/oldWALs, maxLogs=32 2024-11-19T08:47:14,340 INFO [RS:0;3ab37fa97a98:45945 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C45945%2C1732006033709.1732006034340 2024-11-19T08:47:14,352 INFO [RS:0;3ab37fa97a98:45945 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006034340 2024-11-19T08:47:14,354 DEBUG [RS:0;3ab37fa97a98:45945 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41245:41245),(127.0.0.1/127.0.0.1:37891:37891)] 2024-11-19T08:47:14,496 DEBUG [3ab37fa97a98:39659 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T08:47:14,497 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3ab37fa97a98,45945,1732006033709 2024-11-19T08:47:14,499 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3ab37fa97a98,45945,1732006033709, state=OPENING 2024-11-19T08:47:14,520 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T08:47:14,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:14,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:14,533 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T08:47:14,533 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3ab37fa97a98,45945,1732006033709}] 2024-11-19T08:47:14,534 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:47:14,534 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:47:14,691 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T08:47:14,697 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50581, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T08:47:14,702 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T08:47:14,702 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:47:14,704 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C45945%2C1732006033709.meta, suffix=.meta, logDir=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709, archiveDir=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/oldWALs, maxLogs=32 2024-11-19T08:47:14,705 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C45945%2C1732006033709.meta.1732006034705.meta 2024-11-19T08:47:14,712 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.meta.1732006034705.meta 2024-11-19T08:47:14,714 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37891:37891),(127.0.0.1/127.0.0.1:41245:41245)] 2024-11-19T08:47:14,715 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T08:47:14,715 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T08:47:14,715 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T08:47:14,715 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T08:47:14,716 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T08:47:14,716 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:47:14,716 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T08:47:14,716 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T08:47:14,718 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T08:47:14,719 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T08:47:14,719 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:14,720 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:47:14,720 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T08:47:14,720 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T08:47:14,721 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:14,721 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:47:14,721 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T08:47:14,722 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T08:47:14,722 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:14,722 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:47:14,722 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T08:47:14,723 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T08:47:14,723 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:14,723 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:47:14,724 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T08:47:14,724 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740 2024-11-19T08:47:14,726 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740 2024-11-19T08:47:14,727 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T08:47:14,727 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T08:47:14,728 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T08:47:14,729 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T08:47:14,730 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=789538, jitterRate=0.003950640559196472}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T08:47:14,730 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T08:47:14,731 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732006034716Writing region info on filesystem at 1732006034716Initializing all the Stores at 1732006034718 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006034718Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006034718Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006034718Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006034718Cleaning up temporary data from old regions at 1732006034727 (+9 ms)Running coprocessor post-open hooks at 1732006034730 (+3 ms)Region opened successfully at 1732006034731 (+1 ms) 2024-11-19T08:47:14,732 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732006034690 2024-11-19T08:47:14,734 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T08:47:14,734 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T08:47:14,735 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3ab37fa97a98,45945,1732006033709 2024-11-19T08:47:14,736 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3ab37fa97a98,45945,1732006033709, state=OPEN 2024-11-19T08:47:14,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T08:47:14,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T08:47:14,782 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3ab37fa97a98,45945,1732006033709 2024-11-19T08:47:14,782 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:47:14,782 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:47:14,788 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T08:47:14,788 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3ab37fa97a98,45945,1732006033709 in 249 msec 2024-11-19T08:47:14,794 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T08:47:14,794 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 695 msec 2024-11-19T08:47:14,795 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:47:14,795 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T08:47:14,797 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T08:47:14,797 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3ab37fa97a98,45945,1732006033709, seqNum=-1] 2024-11-19T08:47:14,798 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T08:47:14,799 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49413, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T08:47:14,806 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 764 msec 2024-11-19T08:47:14,807 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732006034806, completionTime=-1 2024-11-19T08:47:14,807 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T08:47:14,807 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T08:47:14,809 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T08:47:14,809 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732006094809 2024-11-19T08:47:14,809 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732006154809 2024-11-19T08:47:14,809 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-19T08:47:14,810 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,39659,1732006033548-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,810 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,39659,1732006033548-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,810 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,39659,1732006033548-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,810 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3ab37fa97a98:39659, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,810 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,811 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,813 DEBUG [master/3ab37fa97a98:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T08:47:14,816 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.043sec 2024-11-19T08:47:14,816 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T08:47:14,816 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T08:47:14,816 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T08:47:14,816 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T08:47:14,816 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T08:47:14,816 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,39659,1732006033548-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T08:47:14,816 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,39659,1732006033548-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T08:47:14,820 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T08:47:14,820 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T08:47:14,820 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,39659,1732006033548-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:14,832 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@791546e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:47:14,832 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3ab37fa97a98,39659,-1 for getting cluster id 2024-11-19T08:47:14,833 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T08:47:14,835 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '03fa1691-d517-4258-94b2-77e236eb458e' 2024-11-19T08:47:14,836 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T08:47:14,836 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "03fa1691-d517-4258-94b2-77e236eb458e" 2024-11-19T08:47:14,836 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c3e09de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:47:14,837 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3ab37fa97a98,39659,-1] 2024-11-19T08:47:14,837 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T08:47:14,837 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:47:14,840 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43448, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T08:47:14,841 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e05265c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:47:14,842 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T08:47:14,843 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3ab37fa97a98,45945,1732006033709, seqNum=-1] 2024-11-19T08:47:14,844 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T08:47:14,846 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37474, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T08:47:14,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3ab37fa97a98,39659,1732006033548 2024-11-19T08:47:14,849 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:47:14,853 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T08:47:14,853 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-19T08:47:14,853 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-19T08:47:14,853 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T08:47:14,854 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 3ab37fa97a98,39659,1732006033548 2024-11-19T08:47:14,854 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1357bafd 2024-11-19T08:47:14,854 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T08:47:14,856 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43464, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T08:47:14,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39659 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T08:47:14,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39659 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T08:47:14,857 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39659 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T08:47:14,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39659 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-19T08:47:14,860 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T08:47:14,860 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:14,860 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39659 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-19T08:47:14,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39659 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T08:47:14,861 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T08:47:14,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741835_1011 (size=395) 2024-11-19T08:47:14,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37513 is added to blk_1073741835_1011 (size=395) 2024-11-19T08:47:14,870 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 92ae1b2054ff9774fd79c5cd4bed1134, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252 2024-11-19T08:47:14,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741836_1012 (size=78) 2024-11-19T08:47:14,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37513 is added to blk_1073741836_1012 (size=78) 2024-11-19T08:47:14,878 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:47:14,878 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 92ae1b2054ff9774fd79c5cd4bed1134, disabling compactions & flushes 2024-11-19T08:47:14,878 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134. 2024-11-19T08:47:14,878 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134. 2024-11-19T08:47:14,879 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134. after waiting 0 ms 2024-11-19T08:47:14,879 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134. 2024-11-19T08:47:14,879 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134. 2024-11-19T08:47:14,879 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 92ae1b2054ff9774fd79c5cd4bed1134: Waiting for close lock at 1732006034878Disabling compacts and flushes for region at 1732006034878Disabling writes for close at 1732006034879 (+1 ms)Writing region close event to WAL at 1732006034879Closed at 1732006034879 2024-11-19T08:47:14,880 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T08:47:14,881 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732006034880"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732006034880"}]},"ts":"1732006034880"} 2024-11-19T08:47:14,883 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T08:47:14,884 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T08:47:14,885 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732006034884"}]},"ts":"1732006034884"} 2024-11-19T08:47:14,887 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-19T08:47:14,887 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=92ae1b2054ff9774fd79c5cd4bed1134, ASSIGN}] 2024-11-19T08:47:14,888 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=92ae1b2054ff9774fd79c5cd4bed1134, ASSIGN 2024-11-19T08:47:14,889 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=92ae1b2054ff9774fd79c5cd4bed1134, ASSIGN; state=OFFLINE, location=3ab37fa97a98,45945,1732006033709; forceNewPlan=false, retain=false 2024-11-19T08:47:15,041 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=92ae1b2054ff9774fd79c5cd4bed1134, regionState=OPENING, regionLocation=3ab37fa97a98,45945,1732006033709 2024-11-19T08:47:15,048 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=92ae1b2054ff9774fd79c5cd4bed1134, ASSIGN because future has completed 2024-11-19T08:47:15,049 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 92ae1b2054ff9774fd79c5cd4bed1134, server=3ab37fa97a98,45945,1732006033709}] 2024-11-19T08:47:15,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:15,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:15,210 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134. 2024-11-19T08:47:15,210 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 92ae1b2054ff9774fd79c5cd4bed1134, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134.', STARTKEY => '', ENDKEY => ''} 2024-11-19T08:47:15,211 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 92ae1b2054ff9774fd79c5cd4bed1134 2024-11-19T08:47:15,211 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:47:15,211 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 92ae1b2054ff9774fd79c5cd4bed1134 2024-11-19T08:47:15,211 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 92ae1b2054ff9774fd79c5cd4bed1134 2024-11-19T08:47:15,213 INFO [StoreOpener-92ae1b2054ff9774fd79c5cd4bed1134-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 92ae1b2054ff9774fd79c5cd4bed1134 2024-11-19T08:47:15,214 INFO [StoreOpener-92ae1b2054ff9774fd79c5cd4bed1134-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 92ae1b2054ff9774fd79c5cd4bed1134 columnFamilyName info 2024-11-19T08:47:15,214 DEBUG [StoreOpener-92ae1b2054ff9774fd79c5cd4bed1134-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:15,215 INFO [StoreOpener-92ae1b2054ff9774fd79c5cd4bed1134-1 {}] regionserver.HStore(327): Store=92ae1b2054ff9774fd79c5cd4bed1134/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:47:15,215 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 92ae1b2054ff9774fd79c5cd4bed1134 2024-11-19T08:47:15,216 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/default/TestLogRolling-testLogRollOnPipelineRestart/92ae1b2054ff9774fd79c5cd4bed1134 2024-11-19T08:47:15,217 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/default/TestLogRolling-testLogRollOnPipelineRestart/92ae1b2054ff9774fd79c5cd4bed1134 2024-11-19T08:47:15,217 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 92ae1b2054ff9774fd79c5cd4bed1134 2024-11-19T08:47:15,217 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 92ae1b2054ff9774fd79c5cd4bed1134 2024-11-19T08:47:15,219 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 92ae1b2054ff9774fd79c5cd4bed1134 2024-11-19T08:47:15,221 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/default/TestLogRolling-testLogRollOnPipelineRestart/92ae1b2054ff9774fd79c5cd4bed1134/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T08:47:15,221 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 92ae1b2054ff9774fd79c5cd4bed1134; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=860526, jitterRate=0.09421634674072266}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T08:47:15,222 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 92ae1b2054ff9774fd79c5cd4bed1134 2024-11-19T08:47:15,222 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 92ae1b2054ff9774fd79c5cd4bed1134: Running coprocessor pre-open hook at 1732006035211Writing region info on filesystem at 1732006035211Initializing all the Stores at 1732006035212 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006035212Cleaning up temporary data from old regions at 1732006035217 (+5 ms)Running coprocessor post-open hooks at 1732006035222 (+5 ms)Region opened successfully at 1732006035222 2024-11-19T08:47:15,223 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134., pid=6, masterSystemTime=1732006035206 2024-11-19T08:47:15,225 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134. 2024-11-19T08:47:15,225 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134. 2024-11-19T08:47:15,226 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=92ae1b2054ff9774fd79c5cd4bed1134, regionState=OPEN, openSeqNum=2, regionLocation=3ab37fa97a98,45945,1732006033709 2024-11-19T08:47:15,228 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 92ae1b2054ff9774fd79c5cd4bed1134, server=3ab37fa97a98,45945,1732006033709 because future has completed 2024-11-19T08:47:15,232 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T08:47:15,232 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 92ae1b2054ff9774fd79c5cd4bed1134, server=3ab37fa97a98,45945,1732006033709 in 181 msec 2024-11-19T08:47:15,235 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T08:47:15,236 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=92ae1b2054ff9774fd79c5cd4bed1134, ASSIGN in 345 msec 2024-11-19T08:47:15,236 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T08:47:15,237 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732006035237"}]},"ts":"1732006035237"} 2024-11-19T08:47:15,239 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-19T08:47:15,240 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T08:47:15,242 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 383 msec 2024-11-19T08:47:16,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:16,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:17,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:17,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:18,001 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T08:47:18,032 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:18,032 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:18,033 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:18,033 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:18,033 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:18,034 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:18,037 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:18,038 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:18,038 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:18,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:18,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:18,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:19,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:19,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:20,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:20,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:20,202 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T08:47:20,203 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-19T08:47:21,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:21,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:22,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:22,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:22,492 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T08:47:22,492 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T08:47:22,493 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-19T08:47:22,493 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-19T08:47:22,494 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T08:47:22,494 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T08:47:22,494 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T08:47:22,494 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-19T08:47:23,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:23,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:24,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:24,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:24,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39659 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T08:47:24,961 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-19T08:47:24,961 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-19T08:47:24,965 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-19T08:47:24,965 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134. 2024-11-19T08:47:24,970 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134., hostname=3ab37fa97a98,45945,1732006033709, seqNum=2] 2024-11-19T08:47:25,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:25,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:26,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:26,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:26,973 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006034340 2024-11-19T08:47:26,974 WARN [ResponseProcessor for block BP-1281909225-172.17.0.2-1732006031857:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1281909225-172.17.0.2-1732006031857:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:26,974 WARN [ResponseProcessor for block BP-1281909225-172.17.0.2-1732006031857:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1281909225-172.17.0.2-1732006031857:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:26,974 WARN [ResponseProcessor for block BP-1281909225-172.17.0.2-1732006031857:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1281909225-172.17.0.2-1732006031857:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1281909225-172.17.0.2-1732006031857:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:37513,DS-d4d3f402-eb12-4e47-8720-2371f3cf52ed,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:26,974 WARN [DataStreamer for file /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.meta.1732006034705.meta block BP-1281909225-172.17.0.2-1732006031857:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1281909225-172.17.0.2-1732006031857:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44705,DS-fa53e452-9594-4d74-a5ce-dca1a1394415,DISK], DatanodeInfoWithStorage[127.0.0.1:37513,DS-d4d3f402-eb12-4e47-8720-2371f3cf52ed,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37513,DS-d4d3f402-eb12-4e47-8720-2371f3cf52ed,DISK]) is bad. 2024-11-19T08:47:26,974 WARN [DataStreamer for file /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/WALs/3ab37fa97a98,39659,1732006033548/3ab37fa97a98%2C39659%2C1732006033548.1732006033847 block BP-1281909225-172.17.0.2-1732006031857:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1281909225-172.17.0.2-1732006031857:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37513,DS-d4d3f402-eb12-4e47-8720-2371f3cf52ed,DISK], DatanodeInfoWithStorage[127.0.0.1:44705,DS-fa53e452-9594-4d74-a5ce-dca1a1394415,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37513,DS-d4d3f402-eb12-4e47-8720-2371f3cf52ed,DISK]) is bad. 2024-11-19T08:47:26,975 WARN [DataStreamer for file /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006034340 block BP-1281909225-172.17.0.2-1732006031857:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1281909225-172.17.0.2-1732006031857:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37513,DS-d4d3f402-eb12-4e47-8720-2371f3cf52ed,DISK], DatanodeInfoWithStorage[127.0.0.1:44705,DS-fa53e452-9594-4d74-a5ce-dca1a1394415,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37513,DS-d4d3f402-eb12-4e47-8720-2371f3cf52ed,DISK]) is bad. 2024-11-19T08:47:26,974 WARN [PacketResponder: BP-1281909225-172.17.0.2-1732006031857:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37513] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:26,975 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1799322033_22 at /127.0.0.1:51158 [Receiving block BP-1281909225-172.17.0.2-1732006031857:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37513:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51158 dst: /127.0.0.1:37513 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:26,975 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1010737429_22 at /127.0.0.1:55862 [Receiving block BP-1281909225-172.17.0.2-1732006031857:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44705:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55862 dst: /127.0.0.1:44705 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:26,975 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1799322033_22 at /127.0.0.1:55832 [Receiving block BP-1281909225-172.17.0.2-1732006031857:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44705:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55832 dst: /127.0.0.1:44705 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:26,975 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1010737429_22 at /127.0.0.1:51184 [Receiving block BP-1281909225-172.17.0.2-1732006031857:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37513:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51184 dst: /127.0.0.1:37513 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:26,976 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1010737429_22 at /127.0.0.1:55878 [Receiving block BP-1281909225-172.17.0.2-1732006031857:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44705:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55878 dst: /127.0.0.1:44705 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:26,976 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1010737429_22 at /127.0.0.1:51190 [Receiving block BP-1281909225-172.17.0.2-1732006031857:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37513:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51190 dst: /127.0.0.1:37513 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:27,035 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7ab3aff0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:47:27,035 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69606aa6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:47:27,035 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:47:27,036 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1afad5ae{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:47:27,036 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@582f5b19{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/hadoop.log.dir/,STOPPED} 2024-11-19T08:47:27,037 WARN [BP-1281909225-172.17.0.2-1732006031857 heartbeating to localhost/127.0.0.1:36463 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:47:27,037 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:47:27,037 WARN [BP-1281909225-172.17.0.2-1732006031857 heartbeating to localhost/127.0.0.1:36463 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1281909225-172.17.0.2-1732006031857 (Datanode Uuid 8c4ebc63-1b63-448c-b11d-922067cd4f1c) service to localhost/127.0.0.1:36463 2024-11-19T08:47:27,037 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:47:27,037 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/cluster_f14576ae-8d26-96c4-d79a-1b1d2b202a8b/data/data3/current/BP-1281909225-172.17.0.2-1732006031857 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:47:27,038 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/cluster_f14576ae-8d26-96c4-d79a-1b1d2b202a8b/data/data4/current/BP-1281909225-172.17.0.2-1732006031857 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:47:27,038 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:47:27,045 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:47:27,050 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:47:27,051 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:47:27,051 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:47:27,051 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T08:47:27,052 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@38091200{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:47:27,052 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37755ef6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:47:27,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:27,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:27,191 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6e6faf55{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/java.io.tmpdir/jetty-localhost-43867-hadoop-hdfs-3_4_1-tests_jar-_-any-4550415784812193617/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:47:27,192 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@224c49fc{HTTP/1.1, (http/1.1)}{localhost:43867} 2024-11-19T08:47:27,192 INFO [Time-limited test {}] server.Server(415): Started @171720ms 2024-11-19T08:47:27,194 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:47:27,232 WARN [ResponseProcessor for block BP-1281909225-172.17.0.2-1732006031857:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1281909225-172.17.0.2-1732006031857:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:27,232 WARN [ResponseProcessor for block BP-1281909225-172.17.0.2-1732006031857:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1281909225-172.17.0.2-1732006031857:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:27,232 WARN [ResponseProcessor for block BP-1281909225-172.17.0.2-1732006031857:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1281909225-172.17.0.2-1732006031857:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:27,233 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1010737429_22 at /127.0.0.1:56450 [Receiving block BP-1281909225-172.17.0.2-1732006031857:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44705:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56450 dst: /127.0.0.1:44705 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:27,233 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1799322033_22 at /127.0.0.1:56458 [Receiving block BP-1281909225-172.17.0.2-1732006031857:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44705:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56458 dst: /127.0.0.1:44705 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:27,234 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1010737429_22 at /127.0.0.1:56452 [Receiving block BP-1281909225-172.17.0.2-1732006031857:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44705:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56452 dst: /127.0.0.1:44705 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:27,247 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@667c15a8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:47:27,248 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@633966fa{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:47:27,248 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:47:27,248 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5260e8b3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:47:27,248 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@675d0ec1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/hadoop.log.dir/,STOPPED} 2024-11-19T08:47:27,249 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:47:27,249 WARN [BP-1281909225-172.17.0.2-1732006031857 heartbeating to localhost/127.0.0.1:36463 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:47:27,250 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:47:27,250 WARN [BP-1281909225-172.17.0.2-1732006031857 heartbeating to localhost/127.0.0.1:36463 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1281909225-172.17.0.2-1732006031857 (Datanode Uuid f95d6baa-f132-465b-b24f-1a4a66163a87) service to localhost/127.0.0.1:36463 2024-11-19T08:47:27,250 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/cluster_f14576ae-8d26-96c4-d79a-1b1d2b202a8b/data/data1/current/BP-1281909225-172.17.0.2-1732006031857 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:47:27,251 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/cluster_f14576ae-8d26-96c4-d79a-1b1d2b202a8b/data/data2/current/BP-1281909225-172.17.0.2-1732006031857 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:47:27,251 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:47:27,270 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:47:27,275 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:47:27,276 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:47:27,277 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:47:27,277 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T08:47:27,277 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39c3cb7e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:47:27,278 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@71cc65b7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:47:27,418 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3e0e9550{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/java.io.tmpdir/jetty-localhost-41699-hadoop-hdfs-3_4_1-tests_jar-_-any-5595111618879509607/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:47:27,418 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@747d6bc2{HTTP/1.1, (http/1.1)}{localhost:41699} 2024-11-19T08:47:27,418 INFO [Time-limited test {}] server.Server(415): Started @171946ms 2024-11-19T08:47:27,420 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:47:27,624 WARN [Thread-1334 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:47:27,627 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4c8c04b8e805304f with lease ID 0x188196281fb58ef7: from storage DS-d4d3f402-eb12-4e47-8720-2371f3cf52ed node DatanodeRegistration(127.0.0.1:33071, datanodeUuid=8c4ebc63-1b63-448c-b11d-922067cd4f1c, infoPort=35115, infoSecurePort=0, ipcPort=42615, storageInfo=lv=-57;cid=testClusterID;nsid=2047881705;c=1732006031857), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:47:27,627 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4c8c04b8e805304f with lease ID 0x188196281fb58ef7: from storage DS-a8cb0989-22fb-438d-bb95-7e2e60a583ab node DatanodeRegistration(127.0.0.1:33071, datanodeUuid=8c4ebc63-1b63-448c-b11d-922067cd4f1c, infoPort=35115, infoSecurePort=0, ipcPort=42615, storageInfo=lv=-57;cid=testClusterID;nsid=2047881705;c=1732006031857), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:47:27,803 WARN [Thread-1355 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:47:27,806 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x203faa1a600cc462 with lease ID 0x188196281fb58ef8: from storage DS-fa53e452-9594-4d74-a5ce-dca1a1394415 node DatanodeRegistration(127.0.0.1:43047, datanodeUuid=f95d6baa-f132-465b-b24f-1a4a66163a87, infoPort=38435, infoSecurePort=0, ipcPort=41361, storageInfo=lv=-57;cid=testClusterID;nsid=2047881705;c=1732006031857), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:47:27,806 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x203faa1a600cc462 with lease ID 0x188196281fb58ef8: from storage DS-6979ebd7-950b-4396-baa6-79da31b047ec node DatanodeRegistration(127.0.0.1:43047, datanodeUuid=f95d6baa-f132-465b-b24f-1a4a66163a87, infoPort=38435, infoSecurePort=0, ipcPort=41361, storageInfo=lv=-57;cid=testClusterID;nsid=2047881705;c=1732006031857), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:47:28,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:28,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:28,484 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-19T08:47:28,486 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-19T08:47:28,489 ERROR [FSHLog-0-hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252-prefix:3ab37fa97a98,45945,1732006033709 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44705,DS-fa53e452-9594-4d74-a5ce-dca1a1394415,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:28,489 WARN [FSHLog-0-hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252-prefix:3ab37fa97a98,45945,1732006033709 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44705,DS-fa53e452-9594-4d74-a5ce-dca1a1394415,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:28,490 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3ab37fa97a98%2C45945%2C1732006033709:(num 1732006034340) roll requested 2024-11-19T08:47:28,490 INFO [regionserver/3ab37fa97a98:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C45945%2C1732006033709.1732006048490 2024-11-19T08:47:28,497 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006034340 newFile=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006048490 2024-11-19T08:47:28,498 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:28,498 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:28,498 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:28,498 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:28,498 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:28,498 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006034340 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006048490 2024-11-19T08:47:28,499 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44705,DS-fa53e452-9594-4d74-a5ce-dca1a1394415,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:28,499 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44705,DS-fa53e452-9594-4d74-a5ce-dca1a1394415,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:28,499 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006034340 2024-11-19T08:47:28,500 WARN [IPC Server handler 1 on default port 36463 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006034340 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-19T08:47:28,500 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006034340 after 1ms 2024-11-19T08:47:28,503 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35115:35115),(127.0.0.1/127.0.0.1:38435:38435)] 2024-11-19T08:47:28,503 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006034340 is not closed yet, will try archiving it next time 2024-11-19T08:47:29,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:29,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:30,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:30,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:30,508 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-19T08:47:30,628 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-19T08:47:31,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:31,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:32,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:32,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:32,501 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006034340 after 4002ms 2024-11-19T08:47:32,511 WARN [ResponseProcessor for block BP-1281909225-172.17.0.2-1732006031857:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1281909225-172.17.0.2-1732006031857:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1281909225-172.17.0.2-1732006031857:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:43047,DS-fa53e452-9594-4d74-a5ce-dca1a1394415,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:32,512 WARN [DataStreamer for file /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006048490 block BP-1281909225-172.17.0.2-1732006031857:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1281909225-172.17.0.2-1732006031857:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33071,DS-d4d3f402-eb12-4e47-8720-2371f3cf52ed,DISK], DatanodeInfoWithStorage[127.0.0.1:43047,DS-fa53e452-9594-4d74-a5ce-dca1a1394415,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43047,DS-fa53e452-9594-4d74-a5ce-dca1a1394415,DISK]) is bad. 2024-11-19T08:47:32,512 WARN [PacketResponder: BP-1281909225-172.17.0.2-1732006031857:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43047] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:32,512 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1010737429_22 at /127.0.0.1:36760 [Receiving block BP-1281909225-172.17.0.2-1732006031857:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:33071:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36760 dst: /127.0.0.1:33071 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:32,513 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1010737429_22 at /127.0.0.1:58006 [Receiving block BP-1281909225-172.17.0.2-1732006031857:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43047:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58006 dst: /127.0.0.1:43047 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:32,515 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3e0e9550{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:47:32,515 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@747d6bc2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:47:32,515 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:47:32,515 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@71cc65b7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:47:32,516 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39c3cb7e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/hadoop.log.dir/,STOPPED} 2024-11-19T08:47:32,517 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:47:32,517 WARN [BP-1281909225-172.17.0.2-1732006031857 heartbeating to localhost/127.0.0.1:36463 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:47:32,517 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:47:32,517 WARN [BP-1281909225-172.17.0.2-1732006031857 heartbeating to localhost/127.0.0.1:36463 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1281909225-172.17.0.2-1732006031857 (Datanode Uuid f95d6baa-f132-465b-b24f-1a4a66163a87) service to localhost/127.0.0.1:36463 2024-11-19T08:47:32,518 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/cluster_f14576ae-8d26-96c4-d79a-1b1d2b202a8b/data/data1/current/BP-1281909225-172.17.0.2-1732006031857 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:47:32,518 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/cluster_f14576ae-8d26-96c4-d79a-1b1d2b202a8b/data/data2/current/BP-1281909225-172.17.0.2-1732006031857 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:47:32,518 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:47:32,536 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:47:32,540 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:47:32,543 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:47:32,543 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:47:32,543 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T08:47:32,543 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@431ff48f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:47:32,544 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cc90b47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:47:32,657 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@89d2ae2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/java.io.tmpdir/jetty-localhost-43653-hadoop-hdfs-3_4_1-tests_jar-_-any-16307239303982266661/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:47:32,657 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ad33209{HTTP/1.1, (http/1.1)}{localhost:43653} 2024-11-19T08:47:32,657 INFO [Time-limited test {}] server.Server(415): Started @177185ms 2024-11-19T08:47:32,659 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:47:32,687 WARN [ResponseProcessor for block BP-1281909225-172.17.0.2-1732006031857:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1281909225-172.17.0.2-1732006031857:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:32,687 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1010737429_22 at /127.0.0.1:59092 [Receiving block BP-1281909225-172.17.0.2-1732006031857:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:33071:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59092 dst: /127.0.0.1:33071 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:32,705 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6e6faf55{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:47:32,705 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@224c49fc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:47:32,705 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:47:32,705 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37755ef6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:47:32,705 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@38091200{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/hadoop.log.dir/,STOPPED} 2024-11-19T08:47:32,707 WARN [BP-1281909225-172.17.0.2-1732006031857 heartbeating to localhost/127.0.0.1:36463 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:47:32,707 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:47:32,707 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:47:32,707 WARN [BP-1281909225-172.17.0.2-1732006031857 heartbeating to localhost/127.0.0.1:36463 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1281909225-172.17.0.2-1732006031857 (Datanode Uuid 8c4ebc63-1b63-448c-b11d-922067cd4f1c) service to localhost/127.0.0.1:36463 2024-11-19T08:47:32,707 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/cluster_f14576ae-8d26-96c4-d79a-1b1d2b202a8b/data/data3/current/BP-1281909225-172.17.0.2-1732006031857 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:47:32,707 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/cluster_f14576ae-8d26-96c4-d79a-1b1d2b202a8b/data/data4/current/BP-1281909225-172.17.0.2-1732006031857 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:47:32,708 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:47:32,724 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:47:32,727 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:47:32,728 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:47:32,728 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:47:32,728 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T08:47:32,729 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70cdb622{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:47:32,729 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78fa9015{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:47:32,830 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3de4d535{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/java.io.tmpdir/jetty-localhost-42125-hadoop-hdfs-3_4_1-tests_jar-_-any-1185598384708127977/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:47:32,830 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@255f545b{HTTP/1.1, (http/1.1)}{localhost:42125} 2024-11-19T08:47:32,830 INFO [Time-limited test {}] server.Server(415): Started @177358ms 2024-11-19T08:47:32,832 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:47:33,086 WARN [Thread-1408 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:47:33,088 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3ec0c6d37b772fc2 with lease ID 0x188196281fb58ef9: from storage DS-fa53e452-9594-4d74-a5ce-dca1a1394415 node DatanodeRegistration(127.0.0.1:36063, datanodeUuid=f95d6baa-f132-465b-b24f-1a4a66163a87, infoPort=38195, infoSecurePort=0, ipcPort=36019, storageInfo=lv=-57;cid=testClusterID;nsid=2047881705;c=1732006031857), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:47:33,088 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3ec0c6d37b772fc2 with lease ID 0x188196281fb58ef9: from storage DS-6979ebd7-950b-4396-baa6-79da31b047ec node DatanodeRegistration(127.0.0.1:36063, datanodeUuid=f95d6baa-f132-465b-b24f-1a4a66163a87, infoPort=38195, infoSecurePort=0, ipcPort=36019, storageInfo=lv=-57;cid=testClusterID;nsid=2047881705;c=1732006031857), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T08:47:33,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:33,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:33,240 WARN [Thread-1428 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:47:33,243 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7c4ed606d94ab6b9 with lease ID 0x188196281fb58efa: from storage DS-d4d3f402-eb12-4e47-8720-2371f3cf52ed node DatanodeRegistration(127.0.0.1:33889, datanodeUuid=8c4ebc63-1b63-448c-b11d-922067cd4f1c, infoPort=39387, infoSecurePort=0, ipcPort=34129, storageInfo=lv=-57;cid=testClusterID;nsid=2047881705;c=1732006031857), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:47:33,243 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7c4ed606d94ab6b9 with lease ID 0x188196281fb58efa: from storage DS-a8cb0989-22fb-438d-bb95-7e2e60a583ab node DatanodeRegistration(127.0.0.1:33889, datanodeUuid=8c4ebc63-1b63-448c-b11d-922067cd4f1c, infoPort=39387, infoSecurePort=0, ipcPort=34129, storageInfo=lv=-57;cid=testClusterID;nsid=2047881705;c=1732006031857), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T08:47:33,849 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-19T08:47:33,852 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-19T08:47:33,854 ERROR [FSHLog-0-hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252-prefix:3ab37fa97a98,45945,1732006033709 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33071,DS-d4d3f402-eb12-4e47-8720-2371f3cf52ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:33,854 WARN [FSHLog-0-hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252-prefix:3ab37fa97a98,45945,1732006033709 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33071,DS-d4d3f402-eb12-4e47-8720-2371f3cf52ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:33,854 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3ab37fa97a98%2C45945%2C1732006033709:(num 1732006048490) roll requested 2024-11-19T08:47:33,855 INFO [regionserver/3ab37fa97a98:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C45945%2C1732006033709.1732006053854 2024-11-19T08:47:33,862 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006048490 newFile=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006053854 2024-11-19T08:47:33,862 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:33,862 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:33,862 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:33,862 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:33,862 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:33,862 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006048490 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006053854 2024-11-19T08:47:33,862 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33071,DS-d4d3f402-eb12-4e47-8720-2371f3cf52ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:33,863 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33071,DS-d4d3f402-eb12-4e47-8720-2371f3cf52ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:33,863 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006048490 2024-11-19T08:47:33,863 WARN [IPC Server handler 3 on default port 36463 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006048490 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-19T08:47:33,863 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006048490 after 0ms 2024-11-19T08:47:33,866 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38195:38195),(127.0.0.1/127.0.0.1:39387:39387)] 2024-11-19T08:47:33,866 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006048490 is not closed yet, will try archiving it next time 2024-11-19T08:47:34,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:34,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:34,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33889 is added to blk_1073741837_1020 (size=2427) 2024-11-19T08:47:35,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:35,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:35,868 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C45945%2C1732006033709.1732006055868 2024-11-19T08:47:35,879 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006053854 newFile=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006055868 2024-11-19T08:47:35,879 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:35,879 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:35,880 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:35,880 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:35,880 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:35,880 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006053854 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006055868 2024-11-19T08:47:35,881 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38195:38195),(127.0.0.1/127.0.0.1:39387:39387)] 2024-11-19T08:47:35,881 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006048490 is not closed yet, will try archiving it next time 2024-11-19T08:47:35,881 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006053854 is not closed yet, will try archiving it next time 2024-11-19T08:47:35,882 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006034340 2024-11-19T08:47:35,882 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006034340 2024-11-19T08:47:35,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33889 is added to blk_1073741838_1019 (size=1264) 2024-11-19T08:47:35,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36063 is added to blk_1073741838_1019 (size=1264) 2024-11-19T08:47:35,883 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006034340 after 1ms 2024-11-19T08:47:35,883 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006034340 2024-11-19T08:47:35,883 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006048490 is not closed yet, will try archiving it next time 2024-11-19T08:47:35,896 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732006035222/Put/vlen=218/seqid=0] 2024-11-19T08:47:35,896 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732006044971/Put/vlen=1045/seqid=0] 2024-11-19T08:47:35,896 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006034340 2024-11-19T08:47:35,896 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006048490 2024-11-19T08:47:35,896 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006048490 2024-11-19T08:47:35,897 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006048490 after 1ms 2024-11-19T08:47:35,897 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006048490 2024-11-19T08:47:35,901 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732006048489/Put/vlen=1045/seqid=0] 2024-11-19T08:47:35,901 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732006050509/Put/vlen=1045/seqid=0] 2024-11-19T08:47:35,901 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006048490 2024-11-19T08:47:35,901 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006053854 2024-11-19T08:47:35,901 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006053854 2024-11-19T08:47:35,902 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006053854 after 1ms 2024-11-19T08:47:35,902 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006053854 2024-11-19T08:47:35,905 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732006053853/Put/vlen=1045/seqid=0] 2024-11-19T08:47:35,905 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006055868 2024-11-19T08:47:35,905 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006055868 2024-11-19T08:47:35,906 WARN [IPC Server handler 1 on default port 36463 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006055868 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-19T08:47:35,906 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006055868 after 1ms 2024-11-19T08:47:36,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:36,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:36,247 WARN [ResponseProcessor for block BP-1281909225-172.17.0.2-1732006031857:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1281909225-172.17.0.2-1732006031857:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:36,247 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1799322033_22 at /127.0.0.1:56912 [Receiving block BP-1281909225-172.17.0.2-1732006031857:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:36063:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56912 dst: /127.0.0.1:36063 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:36063 remote=/127.0.0.1:56912]. Total timeout mills is 60000, 59632 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:36,247 WARN [DataStreamer for file /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006055868 block BP-1281909225-172.17.0.2-1732006031857:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1281909225-172.17.0.2-1732006031857:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36063,DS-fa53e452-9594-4d74-a5ce-dca1a1394415,DISK], DatanodeInfoWithStorage[127.0.0.1:33889,DS-d4d3f402-eb12-4e47-8720-2371f3cf52ed,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36063,DS-fa53e452-9594-4d74-a5ce-dca1a1394415,DISK]) is bad. 2024-11-19T08:47:36,247 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1799322033_22 at /127.0.0.1:60520 [Receiving block BP-1281909225-172.17.0.2-1732006031857:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:33889:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60520 dst: /127.0.0.1:33889 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:47:36,248 WARN [DataStreamer for file /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006055868 block BP-1281909225-172.17.0.2-1732006031857:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1281909225-172.17.0.2-1732006031857:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:36,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36063 is added to blk_1073741839_1022 (size=85) 2024-11-19T08:47:37,089 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-19T08:47:37,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:37,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:37,864 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006048490 after 4001ms 2024-11-19T08:47:38,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:38,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:39,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:39,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:39,907 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006055868 after 4002ms 2024-11-19T08:47:39,907 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006055868 2024-11-19T08:47:39,911 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006055868 2024-11-19T08:47:39,911 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 92ae1b2054ff9774fd79c5cd4bed1134 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-19T08:47:39,912 ERROR [FSHLog-0-hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252-prefix:3ab37fa97a98,45945,1732006033709 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1281909225-172.17.0.2-1732006031857:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:39,912 WARN [FSHLog-0-hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252-prefix:3ab37fa97a98,45945,1732006033709 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1281909225-172.17.0.2-1732006031857:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:39,912 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3ab37fa97a98%2C45945%2C1732006033709:(num 1732006055868) roll requested 2024-11-19T08:47:39,913 INFO [regionserver/3ab37fa97a98:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C45945%2C1732006033709.1732006059913 2024-11-19T08:47:39,919 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006055868 newFile=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006059913 2024-11-19T08:47:39,919 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:39,919 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:39,919 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:39,919 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:39,919 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:39,919 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006055868 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006059913 2024-11-19T08:47:39,919 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1281909225-172.17.0.2-1732006031857:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:39,920 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1281909225-172.17.0.2-1732006031857:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:39,920 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006055868 2024-11-19T08:47:39,921 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006055868 after 1ms 2024-11-19T08:47:39,921 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38195:38195),(127.0.0.1/127.0.0.1:39387:39387)] 2024-11-19T08:47:39,921 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.1732006055868 to hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/oldWALs/3ab37fa97a98%2C45945%2C1732006033709.1732006055868 2024-11-19T08:47:39,938 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/default/TestLogRolling-testLogRollOnPipelineRestart/92ae1b2054ff9774fd79c5cd4bed1134/.tmp/info/d58f298266a84aa884bace850b8f9198 is 1080, key is row1002/info:/1732006044971/Put/seqid=0 2024-11-19T08:47:39,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36063 is added to blk_1073741841_1024 (size=9270) 2024-11-19T08:47:39,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33889 is added to blk_1073741841_1024 (size=9270) 2024-11-19T08:47:39,957 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/default/TestLogRolling-testLogRollOnPipelineRestart/92ae1b2054ff9774fd79c5cd4bed1134/.tmp/info/d58f298266a84aa884bace850b8f9198 2024-11-19T08:47:39,965 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/default/TestLogRolling-testLogRollOnPipelineRestart/92ae1b2054ff9774fd79c5cd4bed1134/.tmp/info/d58f298266a84aa884bace850b8f9198 as hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/default/TestLogRolling-testLogRollOnPipelineRestart/92ae1b2054ff9774fd79c5cd4bed1134/info/d58f298266a84aa884bace850b8f9198 2024-11-19T08:47:39,971 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/default/TestLogRolling-testLogRollOnPipelineRestart/92ae1b2054ff9774fd79c5cd4bed1134/info/d58f298266a84aa884bace850b8f9198, entries=4, sequenceid=8, filesize=9.1 K 2024-11-19T08:47:39,972 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 92ae1b2054ff9774fd79c5cd4bed1134 in 61ms, sequenceid=8, compaction requested=false 2024-11-19T08:47:39,972 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 92ae1b2054ff9774fd79c5cd4bed1134: 2024-11-19T08:47:39,972 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-19T08:47:39,973 ERROR [FSHLog-0-hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252-prefix:3ab37fa97a98,45945,1732006033709.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44705,DS-fa53e452-9594-4d74-a5ce-dca1a1394415,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:39,973 WARN [FSHLog-0-hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252-prefix:3ab37fa97a98,45945,1732006033709.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44705,DS-fa53e452-9594-4d74-a5ce-dca1a1394415,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:39,973 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3ab37fa97a98%2C45945%2C1732006033709.meta:.meta(num 1732006034705) roll requested 2024-11-19T08:47:39,973 INFO [regionserver/3ab37fa97a98:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C45945%2C1732006033709.meta.1732006059973.meta 2024-11-19T08:47:39,989 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:39,989 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:39,989 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:39,989 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:39,989 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:39,989 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.meta.1732006034705.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.meta.1732006059973.meta 2024-11-19T08:47:39,989 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44705,DS-fa53e452-9594-4d74-a5ce-dca1a1394415,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:39,990 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44705,DS-fa53e452-9594-4d74-a5ce-dca1a1394415,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:39,990 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.meta.1732006034705.meta 2024-11-19T08:47:39,990 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39387:39387),(127.0.0.1/127.0.0.1:38195:38195)] 2024-11-19T08:47:39,990 DEBUG [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.meta.1732006034705.meta is not closed yet, will try archiving it next time 2024-11-19T08:47:39,990 WARN [IPC Server handler 4 on default port 36463 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.meta.1732006034705.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1013 2024-11-19T08:47:39,990 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.meta.1732006034705.meta after 0ms 2024-11-19T08:47:40,005 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740/.tmp/info/07d1be6bad154dd4b7b413bce1f80ced is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134./info:regioninfo/1732006035226/Put/seqid=0 2024-11-19T08:47:40,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36063 is added to blk_1073741843_1027 (size=7125) 2024-11-19T08:47:40,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33889 is added to blk_1073741843_1027 (size=7125) 2024-11-19T08:47:40,011 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740/.tmp/info/07d1be6bad154dd4b7b413bce1f80ced 2024-11-19T08:47:40,033 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740/.tmp/ns/98012ae224864789a537500f03bcabda is 43, key is default/ns:d/1732006034800/Put/seqid=0 2024-11-19T08:47:40,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33889 is added to blk_1073741844_1028 (size=5153) 2024-11-19T08:47:40,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36063 is added to blk_1073741844_1028 (size=5153) 2024-11-19T08:47:40,039 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740/.tmp/ns/98012ae224864789a537500f03bcabda 2024-11-19T08:47:40,061 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740/.tmp/table/0359cc3bbb6048909cd1d6e272b8e14a is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732006035237/Put/seqid=0 2024-11-19T08:47:40,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36063 is added to blk_1073741845_1029 (size=5438) 2024-11-19T08:47:40,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33889 is added to blk_1073741845_1029 (size=5438) 2024-11-19T08:47:40,066 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740/.tmp/table/0359cc3bbb6048909cd1d6e272b8e14a 2024-11-19T08:47:40,073 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740/.tmp/info/07d1be6bad154dd4b7b413bce1f80ced as hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740/info/07d1be6bad154dd4b7b413bce1f80ced 2024-11-19T08:47:40,079 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740/info/07d1be6bad154dd4b7b413bce1f80ced, entries=10, sequenceid=11, filesize=7.0 K 2024-11-19T08:47:40,081 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740/.tmp/ns/98012ae224864789a537500f03bcabda as hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740/ns/98012ae224864789a537500f03bcabda 2024-11-19T08:47:40,088 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740/ns/98012ae224864789a537500f03bcabda, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T08:47:40,089 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740/.tmp/table/0359cc3bbb6048909cd1d6e272b8e14a as hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740/table/0359cc3bbb6048909cd1d6e272b8e14a 2024-11-19T08:47:40,096 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740/table/0359cc3bbb6048909cd1d6e272b8e14a, entries=2, sequenceid=11, filesize=5.3 K 2024-11-19T08:47:40,097 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 125ms, sequenceid=11, compaction requested=false 2024-11-19T08:47:40,097 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-19T08:47:40,103 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T08:47:40,103 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T08:47:40,103 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:47:40,103 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:47:40,103 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:47:40,103 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T08:47:40,103 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T08:47:40,103 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=664067432, stopped=false 2024-11-19T08:47:40,103 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3ab37fa97a98,39659,1732006033548 2024-11-19T08:47:40,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T08:47:40,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:40,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T08:47:40,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:40,161 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T08:47:40,161 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T08:47:40,161 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:47:40,161 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:47:40,161 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:47:40,162 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:47:40,162 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3ab37fa97a98,45945,1732006033709' ***** 2024-11-19T08:47:40,162 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T08:47:40,162 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T08:47:40,162 INFO [RS:0;3ab37fa97a98:45945 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T08:47:40,162 INFO [RS:0;3ab37fa97a98:45945 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T08:47:40,162 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T08:47:40,162 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.HRegionServer(3091): Received CLOSE for 92ae1b2054ff9774fd79c5cd4bed1134 2024-11-19T08:47:40,162 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.HRegionServer(959): stopping server 3ab37fa97a98,45945,1732006033709 2024-11-19T08:47:40,162 INFO [RS:0;3ab37fa97a98:45945 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T08:47:40,162 INFO [RS:0;3ab37fa97a98:45945 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3ab37fa97a98:45945. 2024-11-19T08:47:40,162 DEBUG [RS:0;3ab37fa97a98:45945 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:47:40,162 DEBUG [RS:0;3ab37fa97a98:45945 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:47:40,162 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 92ae1b2054ff9774fd79c5cd4bed1134, disabling compactions & flushes 2024-11-19T08:47:40,163 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T08:47:40,163 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134. 2024-11-19T08:47:40,163 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T08:47:40,163 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T08:47:40,163 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134. 2024-11-19T08:47:40,163 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T08:47:40,163 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134. after waiting 0 ms 2024-11-19T08:47:40,163 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134. 2024-11-19T08:47:40,163 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T08:47:40,163 DEBUG [RS:0;3ab37fa97a98:45945 {}] regionserver.HRegionServer(1325): Online Regions={92ae1b2054ff9774fd79c5cd4bed1134=TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134., 1588230740=hbase:meta,,1.1588230740} 2024-11-19T08:47:40,163 DEBUG [RS:0;3ab37fa97a98:45945 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 92ae1b2054ff9774fd79c5cd4bed1134 2024-11-19T08:47:40,163 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T08:47:40,163 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T08:47:40,163 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T08:47:40,163 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T08:47:40,163 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T08:47:40,167 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/default/TestLogRolling-testLogRollOnPipelineRestart/92ae1b2054ff9774fd79c5cd4bed1134/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-19T08:47:40,167 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T08:47:40,168 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134. 2024-11-19T08:47:40,168 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 92ae1b2054ff9774fd79c5cd4bed1134: Waiting for close lock at 1732006060162Running coprocessor pre-close hooks at 1732006060162Disabling compacts and flushes for region at 1732006060162Disabling writes for close at 1732006060163 (+1 ms)Writing region close event to WAL at 1732006060163Running coprocessor post-close hooks at 1732006060168 (+5 ms)Closed at 1732006060168 2024-11-19T08:47:40,168 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T08:47:40,168 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732006034857.92ae1b2054ff9774fd79c5cd4bed1134. 2024-11-19T08:47:40,168 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T08:47:40,168 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732006060163Running coprocessor pre-close hooks at 1732006060163Disabling compacts and flushes for region at 1732006060163Disabling writes for close at 1732006060163Writing region close event to WAL at 1732006060164 (+1 ms)Running coprocessor post-close hooks at 1732006060168 (+4 ms)Closed at 1732006060168 2024-11-19T08:47:40,168 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T08:47:40,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:40,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:40,208 INFO [regionserver/3ab37fa97a98:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T08:47:40,215 INFO [regionserver/3ab37fa97a98:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T08:47:40,215 INFO [regionserver/3ab37fa97a98:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T08:47:40,363 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.HRegionServer(976): stopping server 3ab37fa97a98,45945,1732006033709; all regions closed. 2024-11-19T08:47:40,364 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:40,364 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:40,364 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:40,364 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:40,364 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:40,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36063 is added to blk_1073741842_1025 (size=825) 2024-11-19T08:47:40,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33889 is added to blk_1073741842_1025 (size=825) 2024-11-19T08:47:41,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:41,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:42,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:42,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:42,492 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T08:47:42,492 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T08:47:42,493 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-19T08:47:43,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:43,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:43,243 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-19T08:47:43,529 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T08:47:43,991 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.meta.1732006034705.meta after 4001ms 2024-11-19T08:47:43,992 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/WALs/3ab37fa97a98,45945,1732006033709/3ab37fa97a98%2C45945%2C1732006033709.meta.1732006034705.meta to hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/oldWALs/3ab37fa97a98%2C45945%2C1732006033709.meta.1732006034705.meta 2024-11-19T08:47:43,994 DEBUG [RS:0;3ab37fa97a98:45945 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/oldWALs 2024-11-19T08:47:43,994 INFO [RS:0;3ab37fa97a98:45945 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3ab37fa97a98%2C45945%2C1732006033709.meta:.meta(num 1732006059973) 2024-11-19T08:47:43,995 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:43,998 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:43,998 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:43,999 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:43,999 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:44,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33889 is added to blk_1073741840_1023 (size=1162) 2024-11-19T08:47:44,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36063 is added to blk_1073741840_1023 (size=1162) 2024-11-19T08:47:44,007 DEBUG [RS:0;3ab37fa97a98:45945 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/oldWALs 2024-11-19T08:47:44,007 INFO [RS:0;3ab37fa97a98:45945 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3ab37fa97a98%2C45945%2C1732006033709:(num 1732006059913) 2024-11-19T08:47:44,007 DEBUG [RS:0;3ab37fa97a98:45945 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:47:44,007 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T08:47:44,007 INFO [RS:0;3ab37fa97a98:45945 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T08:47:44,007 INFO [RS:0;3ab37fa97a98:45945 {}] hbase.ChoreService(370): Chore service for: regionserver/3ab37fa97a98:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-19T08:47:44,007 INFO [RS:0;3ab37fa97a98:45945 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T08:47:44,007 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T08:47:44,007 INFO [RS:0;3ab37fa97a98:45945 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45945 2024-11-19T08:47:44,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3ab37fa97a98,45945,1732006033709 2024-11-19T08:47:44,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T08:47:44,060 INFO [RS:0;3ab37fa97a98:45945 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T08:47:44,071 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3ab37fa97a98,45945,1732006033709] 2024-11-19T08:47:44,079 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3ab37fa97a98,45945,1732006033709 already deleted, retry=false 2024-11-19T08:47:44,079 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3ab37fa97a98,45945,1732006033709 expired; onlineServers=0 2024-11-19T08:47:44,079 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3ab37fa97a98,39659,1732006033548' ***** 2024-11-19T08:47:44,079 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T08:47:44,079 INFO [M:0;3ab37fa97a98:39659 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T08:47:44,080 INFO [M:0;3ab37fa97a98:39659 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T08:47:44,080 DEBUG [M:0;3ab37fa97a98:39659 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T08:47:44,080 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T08:47:44,080 DEBUG [M:0;3ab37fa97a98:39659 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T08:47:44,080 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732006034056 {}] cleaner.HFileCleaner(306): Exit Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732006034056,5,FailOnTimeoutGroup] 2024-11-19T08:47:44,080 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732006034056 {}] cleaner.HFileCleaner(306): Exit Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732006034056,5,FailOnTimeoutGroup] 2024-11-19T08:47:44,080 INFO [M:0;3ab37fa97a98:39659 {}] hbase.ChoreService(370): Chore service for: master/3ab37fa97a98:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T08:47:44,080 INFO [M:0;3ab37fa97a98:39659 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T08:47:44,080 DEBUG [M:0;3ab37fa97a98:39659 {}] master.HMaster(1795): Stopping service threads 2024-11-19T08:47:44,080 INFO [M:0;3ab37fa97a98:39659 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T08:47:44,080 INFO [M:0;3ab37fa97a98:39659 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T08:47:44,081 INFO [M:0;3ab37fa97a98:39659 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T08:47:44,081 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T08:47:44,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T08:47:44,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:44,088 DEBUG [M:0;3ab37fa97a98:39659 {}] zookeeper.ZKUtil(347): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T08:47:44,088 WARN [M:0;3ab37fa97a98:39659 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T08:47:44,089 INFO [M:0;3ab37fa97a98:39659 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/.lastflushedseqids 2024-11-19T08:47:44,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36063 is added to blk_1073741846_1030 (size=111) 2024-11-19T08:47:44,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33889 is added to blk_1073741846_1030 (size=111) 2024-11-19T08:47:44,096 INFO [M:0;3ab37fa97a98:39659 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T08:47:44,096 INFO [M:0;3ab37fa97a98:39659 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T08:47:44,096 DEBUG [M:0;3ab37fa97a98:39659 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T08:47:44,096 INFO [M:0;3ab37fa97a98:39659 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:47:44,096 DEBUG [M:0;3ab37fa97a98:39659 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:47:44,096 DEBUG [M:0;3ab37fa97a98:39659 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T08:47:44,096 DEBUG [M:0;3ab37fa97a98:39659 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:47:44,096 INFO [M:0;3ab37fa97a98:39659 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-19T08:47:44,097 ERROR [FSHLog-0-hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData-prefix:3ab37fa97a98,39659,1732006033548 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44705,DS-fa53e452-9594-4d74-a5ce-dca1a1394415,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:44,097 WARN [FSHLog-0-hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData-prefix:3ab37fa97a98,39659,1732006033548 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44705,DS-fa53e452-9594-4d74-a5ce-dca1a1394415,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:44,097 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 3ab37fa97a98%2C39659%2C1732006033548:(num 1732006033847) roll requested 2024-11-19T08:47:44,097 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C39659%2C1732006033548.1732006064097 2024-11-19T08:47:44,103 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:44,103 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:44,103 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:44,103 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:44,103 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:44,103 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/WALs/3ab37fa97a98,39659,1732006033548/3ab37fa97a98%2C39659%2C1732006033548.1732006033847 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/WALs/3ab37fa97a98,39659,1732006033548/3ab37fa97a98%2C39659%2C1732006033548.1732006064097 2024-11-19T08:47:44,104 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44705,DS-fa53e452-9594-4d74-a5ce-dca1a1394415,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:44,104 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44705,DS-fa53e452-9594-4d74-a5ce-dca1a1394415,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T08:47:44,104 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/WALs/3ab37fa97a98,39659,1732006033548/3ab37fa97a98%2C39659%2C1732006033548.1732006033847 2024-11-19T08:47:44,104 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39387:39387),(127.0.0.1/127.0.0.1:38195:38195)] 2024-11-19T08:47:44,104 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/WALs/3ab37fa97a98,39659,1732006033548/3ab37fa97a98%2C39659%2C1732006033548.1732006033847 is not closed yet, will try archiving it next time 2024-11-19T08:47:44,105 WARN [IPC Server handler 0 on default port 36463 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/WALs/3ab37fa97a98,39659,1732006033548/3ab37fa97a98%2C39659%2C1732006033548.1732006033847 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-19T08:47:44,105 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/WALs/3ab37fa97a98,39659,1732006033548/3ab37fa97a98%2C39659%2C1732006033548.1732006033847 after 1ms 2024-11-19T08:47:44,124 DEBUG [M:0;3ab37fa97a98:39659 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/de0f20b6f38d43f79f33b5c61722ac09 is 82, key is hbase:meta,,1/info:regioninfo/1732006034735/Put/seqid=0 2024-11-19T08:47:44,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36063 is added to blk_1073741848_1033 (size=5672) 2024-11-19T08:47:44,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33889 is added to blk_1073741848_1033 (size=5672) 2024-11-19T08:47:44,129 INFO [M:0;3ab37fa97a98:39659 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/de0f20b6f38d43f79f33b5c61722ac09 2024-11-19T08:47:44,153 DEBUG [M:0;3ab37fa97a98:39659 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ce51582c6e7147bba6e8f906abe69517 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732006035241/Put/seqid=0 2024-11-19T08:47:44,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33889 is added to blk_1073741849_1034 (size=6118) 2024-11-19T08:47:44,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36063 is added to blk_1073741849_1034 (size=6118) 2024-11-19T08:47:44,160 INFO [M:0;3ab37fa97a98:39659 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ce51582c6e7147bba6e8f906abe69517 2024-11-19T08:47:44,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:47:44,171 INFO [RS:0;3ab37fa97a98:45945 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T08:47:44,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45945-0x10153904b430001, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:47:44,171 INFO [RS:0;3ab37fa97a98:45945 {}] regionserver.HRegionServer(1031): Exiting; stopping=3ab37fa97a98,45945,1732006033709; zookeeper connection closed. 2024-11-19T08:47:44,172 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@40abe61e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@40abe61e 2024-11-19T08:47:44,172 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T08:47:44,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:44,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:44,182 DEBUG [M:0;3ab37fa97a98:39659 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4579e8d4fd43454589bdb4a3e68fc72f is 69, key is 3ab37fa97a98,45945,1732006033709/rs:state/1732006034187/Put/seqid=0 2024-11-19T08:47:44,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36063 is added to blk_1073741850_1035 (size=5156) 2024-11-19T08:47:44,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33889 is added to blk_1073741850_1035 (size=5156) 2024-11-19T08:47:44,187 INFO [M:0;3ab37fa97a98:39659 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4579e8d4fd43454589bdb4a3e68fc72f 2024-11-19T08:47:44,209 DEBUG [M:0;3ab37fa97a98:39659 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f91a9cf72d7d49e39942b9cd5b7c7a9d is 52, key is load_balancer_on/state:d/1732006034851/Put/seqid=0 2024-11-19T08:47:44,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33889 is added to blk_1073741851_1036 (size=5056) 2024-11-19T08:47:44,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36063 is added to blk_1073741851_1036 (size=5056) 2024-11-19T08:47:44,214 INFO [M:0;3ab37fa97a98:39659 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f91a9cf72d7d49e39942b9cd5b7c7a9d 2024-11-19T08:47:44,219 DEBUG [M:0;3ab37fa97a98:39659 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/de0f20b6f38d43f79f33b5c61722ac09 as hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/de0f20b6f38d43f79f33b5c61722ac09 2024-11-19T08:47:44,225 INFO [M:0;3ab37fa97a98:39659 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/de0f20b6f38d43f79f33b5c61722ac09, entries=8, sequenceid=56, filesize=5.5 K 2024-11-19T08:47:44,226 DEBUG [M:0;3ab37fa97a98:39659 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ce51582c6e7147bba6e8f906abe69517 as hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ce51582c6e7147bba6e8f906abe69517 2024-11-19T08:47:44,231 INFO [M:0;3ab37fa97a98:39659 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ce51582c6e7147bba6e8f906abe69517, entries=6, sequenceid=56, filesize=6.0 K 2024-11-19T08:47:44,233 DEBUG [M:0;3ab37fa97a98:39659 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4579e8d4fd43454589bdb4a3e68fc72f as hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4579e8d4fd43454589bdb4a3e68fc72f 2024-11-19T08:47:44,239 INFO [M:0;3ab37fa97a98:39659 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4579e8d4fd43454589bdb4a3e68fc72f, entries=1, sequenceid=56, filesize=5.0 K 2024-11-19T08:47:44,240 DEBUG [M:0;3ab37fa97a98:39659 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f91a9cf72d7d49e39942b9cd5b7c7a9d as hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f91a9cf72d7d49e39942b9cd5b7c7a9d 2024-11-19T08:47:44,246 INFO [M:0;3ab37fa97a98:39659 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f91a9cf72d7d49e39942b9cd5b7c7a9d, entries=1, sequenceid=56, filesize=4.9 K 2024-11-19T08:47:44,248 INFO [M:0;3ab37fa97a98:39659 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 152ms, sequenceid=56, compaction requested=false 2024-11-19T08:47:44,249 INFO [M:0;3ab37fa97a98:39659 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:47:44,249 DEBUG [M:0;3ab37fa97a98:39659 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732006064096Disabling compacts and flushes for region at 1732006064096Disabling writes for close at 1732006064096Obtaining lock to block concurrent updates at 1732006064096Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732006064096Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1732006064097 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732006064105 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732006064105Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732006064123 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732006064123Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732006064135 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732006064153 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732006064153Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732006064166 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732006064181 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732006064181Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732006064193 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732006064208 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732006064208Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@68602d93: reopening flushed file at 1732006064219 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23bd5565: reopening flushed file at 1732006064225 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@46bbdc12: reopening flushed file at 1732006064232 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4891236d: reopening flushed file at 1732006064239 (+7 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 152ms, sequenceid=56, compaction requested=false at 1732006064248 (+9 ms)Writing region close event to WAL at 1732006064249 (+1 ms)Closed at 1732006064249 2024-11-19T08:47:44,250 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:44,250 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:44,250 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:44,250 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:44,250 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:47:44,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36063 is added to blk_1073741847_1031 (size=757) 2024-11-19T08:47:44,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33889 is added to blk_1073741847_1031 (size=757) 2024-11-19T08:47:45,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:45,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:45,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,192 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,192 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,196 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,196 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,203 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,706 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T08:47:45,708 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,708 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,733 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,733 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,734 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,734 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,735 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,735 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,739 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,740 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,740 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:45,743 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:46,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:46,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:46,244 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-19T08:47:47,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:47,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:48,106 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/WALs/3ab37fa97a98,39659,1732006033548/3ab37fa97a98%2C39659%2C1732006033548.1732006033847 after 4002ms 2024-11-19T08:47:48,106 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/WALs/3ab37fa97a98,39659,1732006033548/3ab37fa97a98%2C39659%2C1732006033548.1732006033847 to hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/oldWALs/3ab37fa97a98%2C39659%2C1732006033548.1732006033847 2024-11-19T08:47:48,110 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/MasterData/oldWALs/3ab37fa97a98%2C39659%2C1732006033548.1732006033847 to hdfs://localhost:36463/user/jenkins/test-data/b88b7c14-0901-65ae-7931-29694973a252/oldWALs/3ab37fa97a98%2C39659%2C1732006033548.1732006033847$masterlocalwal$ 2024-11-19T08:47:48,110 INFO [M:0;3ab37fa97a98:39659 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T08:47:48,110 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T08:47:48,110 INFO [M:0;3ab37fa97a98:39659 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39659 2024-11-19T08:47:48,111 INFO [M:0;3ab37fa97a98:39659 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T08:47:48,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:48,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:48,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:47:48,251 INFO [M:0;3ab37fa97a98:39659 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T08:47:48,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39659-0x10153904b430000, quorum=127.0.0.1:60070, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:47:48,254 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3de4d535{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:47:48,255 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@255f545b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:47:48,255 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:47:48,255 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78fa9015{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:47:48,255 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70cdb622{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/hadoop.log.dir/,STOPPED} 2024-11-19T08:47:48,256 WARN [BP-1281909225-172.17.0.2-1732006031857 heartbeating to localhost/127.0.0.1:36463 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:47:48,256 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:47:48,256 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:47:48,256 WARN [BP-1281909225-172.17.0.2-1732006031857 heartbeating to localhost/127.0.0.1:36463 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1281909225-172.17.0.2-1732006031857 (Datanode Uuid 8c4ebc63-1b63-448c-b11d-922067cd4f1c) service to localhost/127.0.0.1:36463 2024-11-19T08:47:48,256 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/cluster_f14576ae-8d26-96c4-d79a-1b1d2b202a8b/data/data3/current/BP-1281909225-172.17.0.2-1732006031857 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:47:48,257 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/cluster_f14576ae-8d26-96c4-d79a-1b1d2b202a8b/data/data4/current/BP-1281909225-172.17.0.2-1732006031857 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:47:48,257 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:47:48,259 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@89d2ae2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:47:48,259 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ad33209{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:47:48,259 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:47:48,260 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cc90b47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:47:48,260 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@431ff48f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/hadoop.log.dir/,STOPPED} 2024-11-19T08:47:48,261 WARN [BP-1281909225-172.17.0.2-1732006031857 heartbeating to localhost/127.0.0.1:36463 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:47:48,261 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:47:48,261 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:47:48,261 WARN [BP-1281909225-172.17.0.2-1732006031857 heartbeating to localhost/127.0.0.1:36463 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1281909225-172.17.0.2-1732006031857 (Datanode Uuid f95d6baa-f132-465b-b24f-1a4a66163a87) service to localhost/127.0.0.1:36463 2024-11-19T08:47:48,262 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/cluster_f14576ae-8d26-96c4-d79a-1b1d2b202a8b/data/data1/current/BP-1281909225-172.17.0.2-1732006031857 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:47:48,262 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/cluster_f14576ae-8d26-96c4-d79a-1b1d2b202a8b/data/data2/current/BP-1281909225-172.17.0.2-1732006031857 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:47:48,262 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:47:48,268 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5a083e6f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T08:47:48,268 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@235bcdaa{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:47:48,268 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:47:48,269 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@515d5cec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:47:48,269 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a7a3a77{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/hadoop.log.dir/,STOPPED} 2024-11-19T08:47:48,275 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T08:47:48,293 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T08:47:48,303 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=180 (was 155) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36463 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:36463 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:36463 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:36463 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36463 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36463 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36463 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:36463 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 452) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=242 (was 185) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5167 (was 5717) 2024-11-19T08:47:48,312 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=180, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=242, ProcessCount=11, AvailableMemoryMB=5167 2024-11-19T08:47:48,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T08:47:48,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/hadoop.log.dir so I do NOT create it in target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c 2024-11-19T08:47:48,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07224df-7d3d-a1d9-f785-a83f90873e73/hadoop.tmp.dir so I do NOT create it in target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c 2024-11-19T08:47:48,313 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/cluster_816fbdd1-9850-0245-19de-8bfee4fb83e7, deleteOnExit=true 2024-11-19T08:47:48,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T08:47:48,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/test.cache.data in system properties and HBase conf 2024-11-19T08:47:48,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T08:47:48,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/hadoop.log.dir in system properties and HBase conf 2024-11-19T08:47:48,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T08:47:48,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T08:47:48,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T08:47:48,313 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T08:47:48,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T08:47:48,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T08:47:48,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T08:47:48,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T08:47:48,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T08:47:48,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T08:47:48,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T08:47:48,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T08:47:48,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T08:47:48,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/nfs.dump.dir in system properties and HBase conf 2024-11-19T08:47:48,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/java.io.tmpdir in system properties and HBase conf 2024-11-19T08:47:48,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T08:47:48,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T08:47:48,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T08:47:48,332 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T08:47:48,581 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:47:48,586 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:47:48,593 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:47:48,593 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:47:48,593 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T08:47:48,594 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:47:48,598 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a03003f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:47:48,598 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3adf1c78{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:47:48,729 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4af78b23{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/java.io.tmpdir/jetty-localhost-46639-hadoop-hdfs-3_4_1-tests_jar-_-any-12244376431420872417/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T08:47:48,730 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@45ec22ac{HTTP/1.1, (http/1.1)}{localhost:46639} 2024-11-19T08:47:48,730 INFO [Time-limited test {}] server.Server(415): Started @193258ms 2024-11-19T08:47:48,748 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T08:47:48,997 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:47:49,002 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:47:49,008 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:47:49,008 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:47:49,008 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T08:47:49,009 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2725554a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:47:49,010 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77bac3c5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:47:49,127 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@454902dc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/java.io.tmpdir/jetty-localhost-45583-hadoop-hdfs-3_4_1-tests_jar-_-any-1654657030172287673/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:47:49,128 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6b792597{HTTP/1.1, (http/1.1)}{localhost:45583} 2024-11-19T08:47:49,128 INFO [Time-limited test {}] server.Server(415): Started @193656ms 2024-11-19T08:47:49,129 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:47:49,169 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:47:49,172 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:47:49,173 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:47:49,173 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:47:49,173 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T08:47:49,174 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67556f4d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:47:49,175 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b127990{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:47:49,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:49,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:49,284 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2fb77c6e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/java.io.tmpdir/jetty-localhost-33221-hadoop-hdfs-3_4_1-tests_jar-_-any-8006517382295157656/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:47:49,284 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@57887c0e{HTTP/1.1, (http/1.1)}{localhost:33221} 2024-11-19T08:47:49,284 INFO [Time-limited test {}] server.Server(415): Started @193812ms 2024-11-19T08:47:49,286 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:47:49,876 WARN [Thread-1648 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/cluster_816fbdd1-9850-0245-19de-8bfee4fb83e7/data/data1/current/BP-324031568-172.17.0.2-1732006068345/current, will proceed with Du for space computation calculation, 2024-11-19T08:47:49,876 WARN [Thread-1649 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/cluster_816fbdd1-9850-0245-19de-8bfee4fb83e7/data/data2/current/BP-324031568-172.17.0.2-1732006068345/current, will proceed with Du for space computation calculation, 2024-11-19T08:47:49,895 WARN [Thread-1612 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:47:49,898 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb61275ea9b3d72ac with lease ID 0x648f38219333d8ff: Processing first storage report for DS-a34b1134-32b2-46a7-b4db-bd6fb2334d2c from datanode DatanodeRegistration(127.0.0.1:43439, datanodeUuid=eb7a44ac-3990-4446-8cba-d1fc32692a86, infoPort=42747, infoSecurePort=0, ipcPort=37211, storageInfo=lv=-57;cid=testClusterID;nsid=1603878361;c=1732006068345) 2024-11-19T08:47:49,898 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb61275ea9b3d72ac with lease ID 0x648f38219333d8ff: from storage DS-a34b1134-32b2-46a7-b4db-bd6fb2334d2c node DatanodeRegistration(127.0.0.1:43439, datanodeUuid=eb7a44ac-3990-4446-8cba-d1fc32692a86, infoPort=42747, infoSecurePort=0, ipcPort=37211, storageInfo=lv=-57;cid=testClusterID;nsid=1603878361;c=1732006068345), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:47:49,899 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb61275ea9b3d72ac with lease ID 0x648f38219333d8ff: Processing first storage report for DS-c25ebcda-7fe6-48a7-8c4d-f8b094e43be0 from datanode DatanodeRegistration(127.0.0.1:43439, datanodeUuid=eb7a44ac-3990-4446-8cba-d1fc32692a86, infoPort=42747, infoSecurePort=0, ipcPort=37211, storageInfo=lv=-57;cid=testClusterID;nsid=1603878361;c=1732006068345) 2024-11-19T08:47:49,899 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb61275ea9b3d72ac with lease ID 0x648f38219333d8ff: from storage DS-c25ebcda-7fe6-48a7-8c4d-f8b094e43be0 node DatanodeRegistration(127.0.0.1:43439, datanodeUuid=eb7a44ac-3990-4446-8cba-d1fc32692a86, infoPort=42747, infoSecurePort=0, ipcPort=37211, storageInfo=lv=-57;cid=testClusterID;nsid=1603878361;c=1732006068345), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:47:50,034 WARN [Thread-1660 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/cluster_816fbdd1-9850-0245-19de-8bfee4fb83e7/data/data4/current/BP-324031568-172.17.0.2-1732006068345/current, will proceed with Du for space computation calculation, 2024-11-19T08:47:50,034 WARN [Thread-1659 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/cluster_816fbdd1-9850-0245-19de-8bfee4fb83e7/data/data3/current/BP-324031568-172.17.0.2-1732006068345/current, will proceed with Du for space computation calculation, 2024-11-19T08:47:50,054 WARN [Thread-1635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:47:50,057 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x21eed89733284038 with lease ID 0x648f38219333d900: Processing first storage report for DS-26f6bb87-2143-4d43-8fcf-22bca7b0702d from datanode DatanodeRegistration(127.0.0.1:38083, datanodeUuid=fb2b8bba-1ef6-4634-b7be-6d14749bffc2, infoPort=41431, infoSecurePort=0, ipcPort=35683, storageInfo=lv=-57;cid=testClusterID;nsid=1603878361;c=1732006068345) 2024-11-19T08:47:50,057 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x21eed89733284038 with lease ID 0x648f38219333d900: from storage DS-26f6bb87-2143-4d43-8fcf-22bca7b0702d node DatanodeRegistration(127.0.0.1:38083, datanodeUuid=fb2b8bba-1ef6-4634-b7be-6d14749bffc2, infoPort=41431, infoSecurePort=0, ipcPort=35683, storageInfo=lv=-57;cid=testClusterID;nsid=1603878361;c=1732006068345), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:47:50,057 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x21eed89733284038 with lease ID 0x648f38219333d900: Processing first storage report for DS-13ac1832-9f8b-4540-8406-477c4b6f1d66 from datanode DatanodeRegistration(127.0.0.1:38083, datanodeUuid=fb2b8bba-1ef6-4634-b7be-6d14749bffc2, infoPort=41431, infoSecurePort=0, ipcPort=35683, storageInfo=lv=-57;cid=testClusterID;nsid=1603878361;c=1732006068345) 2024-11-19T08:47:50,057 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x21eed89733284038 with lease ID 0x648f38219333d900: from storage DS-13ac1832-9f8b-4540-8406-477c4b6f1d66 node DatanodeRegistration(127.0.0.1:38083, datanodeUuid=fb2b8bba-1ef6-4634-b7be-6d14749bffc2, infoPort=41431, infoSecurePort=0, ipcPort=35683, storageInfo=lv=-57;cid=testClusterID;nsid=1603878361;c=1732006068345), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:47:50,119 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c 2024-11-19T08:47:50,127 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/cluster_816fbdd1-9850-0245-19de-8bfee4fb83e7/zookeeper_0, clientPort=54066, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/cluster_816fbdd1-9850-0245-19de-8bfee4fb83e7/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/cluster_816fbdd1-9850-0245-19de-8bfee4fb83e7/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T08:47:50,128 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54066 2024-11-19T08:47:50,129 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:47:50,130 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:47:50,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741825_1001 (size=7) 2024-11-19T08:47:50,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741825_1001 (size=7) 2024-11-19T08:47:50,170 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28 with version=8 2024-11-19T08:47:50,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/hbase-staging 2024-11-19T08:47:50,172 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3ab37fa97a98:0 server-side Connection retries=45 2024-11-19T08:47:50,172 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:47:50,172 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T08:47:50,172 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T08:47:50,172 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:47:50,172 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T08:47:50,172 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T08:47:50,172 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T08:47:50,173 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43785 2024-11-19T08:47:50,174 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43785 connecting to ZooKeeper ensemble=127.0.0.1:54066 2024-11-19T08:47:50,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:50,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:50,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:437850x0, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T08:47:50,216 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43785-0x1015390da300000 connected 2024-11-19T08:47:50,278 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:47:50,280 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:47:50,282 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:47:50,282 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28, hbase.cluster.distributed=false 2024-11-19T08:47:50,283 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T08:47:50,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43785 2024-11-19T08:47:50,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43785 2024-11-19T08:47:50,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43785 2024-11-19T08:47:50,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43785 2024-11-19T08:47:50,285 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43785 2024-11-19T08:47:50,303 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3ab37fa97a98:0 server-side Connection retries=45 2024-11-19T08:47:50,303 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:47:50,303 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T08:47:50,303 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T08:47:50,303 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:47:50,303 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T08:47:50,303 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T08:47:50,303 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T08:47:50,304 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38843 2024-11-19T08:47:50,306 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38843 connecting to ZooKeeper ensemble=127.0.0.1:54066 2024-11-19T08:47:50,307 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:47:50,308 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:47:50,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:388430x0, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T08:47:50,320 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:388430x0, quorum=127.0.0.1:54066, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:47:50,320 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38843-0x1015390da300001 connected 2024-11-19T08:47:50,320 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T08:47:50,321 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T08:47:50,322 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38843-0x1015390da300001, quorum=127.0.0.1:54066, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T08:47:50,323 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38843-0x1015390da300001, quorum=127.0.0.1:54066, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T08:47:50,323 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38843 2024-11-19T08:47:50,323 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38843 2024-11-19T08:47:50,323 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38843 2024-11-19T08:47:50,324 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38843 2024-11-19T08:47:50,324 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38843 2024-11-19T08:47:50,341 DEBUG [M:0;3ab37fa97a98:43785 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3ab37fa97a98:43785 2024-11-19T08:47:50,342 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3ab37fa97a98,43785,1732006070171 2024-11-19T08:47:50,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:47:50,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38843-0x1015390da300001, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:47:50,353 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3ab37fa97a98,43785,1732006070171 2024-11-19T08:47:50,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:50,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38843-0x1015390da300001, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T08:47:50,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38843-0x1015390da300001, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:50,362 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T08:47:50,363 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3ab37fa97a98,43785,1732006070171 from backup master directory 2024-11-19T08:47:50,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38843-0x1015390da300001, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:47:50,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3ab37fa97a98,43785,1732006070171 2024-11-19T08:47:50,370 WARN [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T08:47:50,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:47:50,370 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3ab37fa97a98,43785,1732006070171 2024-11-19T08:47:50,375 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/hbase.id] with ID: 40f1d1fa-8ae1-4908-91e3-f5ec2faec1fd 2024-11-19T08:47:50,376 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/.tmp/hbase.id 2024-11-19T08:47:50,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741826_1002 (size=42) 2024-11-19T08:47:50,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741826_1002 (size=42) 2024-11-19T08:47:50,386 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/.tmp/hbase.id]:[hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/hbase.id] 2024-11-19T08:47:50,399 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:47:50,399 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T08:47:50,400 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-19T08:47:50,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:50,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38843-0x1015390da300001, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:50,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741827_1003 (size=196) 2024-11-19T08:47:50,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741827_1003 (size=196) 2024-11-19T08:47:50,418 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T08:47:50,419 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T08:47:50,419 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:47:50,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741828_1004 (size=1189) 2024-11-19T08:47:50,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741828_1004 (size=1189) 2024-11-19T08:47:50,430 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store 2024-11-19T08:47:50,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741829_1005 (size=34) 2024-11-19T08:47:50,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741829_1005 (size=34) 2024-11-19T08:47:50,442 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:47:50,442 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T08:47:50,442 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:47:50,442 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:47:50,442 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T08:47:50,442 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:47:50,442 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:47:50,442 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732006070442Disabling compacts and flushes for region at 1732006070442Disabling writes for close at 1732006070442Writing region close event to WAL at 1732006070442Closed at 1732006070442 2024-11-19T08:47:50,443 WARN [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/.initializing 2024-11-19T08:47:50,443 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/WALs/3ab37fa97a98,43785,1732006070171 2024-11-19T08:47:50,446 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C43785%2C1732006070171, suffix=, logDir=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/WALs/3ab37fa97a98,43785,1732006070171, archiveDir=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/oldWALs, maxLogs=10 2024-11-19T08:47:50,447 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C43785%2C1732006070171.1732006070446 2024-11-19T08:47:50,456 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/WALs/3ab37fa97a98,43785,1732006070171/3ab37fa97a98%2C43785%2C1732006070171.1732006070446 2024-11-19T08:47:50,457 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42747:42747),(127.0.0.1/127.0.0.1:41431:41431)] 2024-11-19T08:47:50,458 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T08:47:50,458 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:47:50,459 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:50,459 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:50,460 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:50,461 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T08:47:50,461 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:50,462 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:47:50,462 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:50,463 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T08:47:50,463 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:50,463 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:47:50,463 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:50,464 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T08:47:50,464 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:50,465 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:47:50,465 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:50,466 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T08:47:50,466 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:50,466 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:47:50,466 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:50,467 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:50,467 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:50,469 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:50,469 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:50,469 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T08:47:50,470 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:47:50,472 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T08:47:50,473 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=727822, jitterRate=-0.074527308344841}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T08:47:50,474 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732006070459Initializing all the Stores at 1732006070459Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006070460 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006070460Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006070460Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006070460Cleaning up temporary data from old regions at 1732006070469 (+9 ms)Region opened successfully at 1732006070473 (+4 ms) 2024-11-19T08:47:50,474 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T08:47:50,477 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ff8946f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3ab37fa97a98/172.17.0.2:0 2024-11-19T08:47:50,478 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T08:47:50,478 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T08:47:50,478 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T08:47:50,479 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T08:47:50,479 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T08:47:50,479 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T08:47:50,479 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T08:47:50,481 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T08:47:50,482 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T08:47:50,495 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T08:47:50,495 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T08:47:50,496 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T08:47:50,503 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T08:47:50,503 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T08:47:50,504 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T08:47:50,511 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T08:47:50,512 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T08:47:50,519 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T08:47:50,522 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T08:47:50,528 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T08:47:50,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T08:47:50,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38843-0x1015390da300001, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T08:47:50,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38843-0x1015390da300001, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:50,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:50,538 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3ab37fa97a98,43785,1732006070171, sessionid=0x1015390da300000, setting cluster-up flag (Was=false) 2024-11-19T08:47:50,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:50,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38843-0x1015390da300001, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:50,578 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T08:47:50,579 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3ab37fa97a98,43785,1732006070171 2024-11-19T08:47:50,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:50,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38843-0x1015390da300001, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:50,620 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T08:47:50,621 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3ab37fa97a98,43785,1732006070171 2024-11-19T08:47:50,622 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T08:47:50,624 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T08:47:50,624 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T08:47:50,624 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T08:47:50,625 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3ab37fa97a98,43785,1732006070171 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T08:47:50,626 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:47:50,627 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:47:50,627 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:47:50,627 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:47:50,627 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3ab37fa97a98:0, corePoolSize=10, maxPoolSize=10 2024-11-19T08:47:50,627 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:50,627 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=2, maxPoolSize=2 2024-11-19T08:47:50,627 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:50,628 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.HRegionServer(746): ClusterId : 40f1d1fa-8ae1-4908-91e3-f5ec2faec1fd 2024-11-19T08:47:50,628 DEBUG [RS:0;3ab37fa97a98:38843 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T08:47:50,629 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732006100628 2024-11-19T08:47:50,629 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T08:47:50,629 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T08:47:50,629 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T08:47:50,629 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T08:47:50,629 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T08:47:50,629 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T08:47:50,629 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:50,629 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:47:50,629 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T08:47:50,630 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T08:47:50,630 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T08:47:50,630 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T08:47:50,630 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T08:47:50,630 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T08:47:50,631 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:50,631 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732006070631,5,FailOnTimeoutGroup] 2024-11-19T08:47:50,631 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732006070631,5,FailOnTimeoutGroup] 2024-11-19T08:47:50,631 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:50,631 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T08:47:50,631 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:50,631 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T08:47:50,631 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:50,637 DEBUG [RS:0;3ab37fa97a98:38843 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T08:47:50,637 DEBUG [RS:0;3ab37fa97a98:38843 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T08:47:50,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741831_1007 (size=1321) 2024-11-19T08:47:50,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741831_1007 (size=1321) 2024-11-19T08:47:50,645 DEBUG [RS:0;3ab37fa97a98:38843 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T08:47:50,646 DEBUG [RS:0;3ab37fa97a98:38843 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7163ccd6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3ab37fa97a98/172.17.0.2:0 2024-11-19T08:47:50,647 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T08:47:50,647 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28 2024-11-19T08:47:50,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741832_1008 (size=32) 2024-11-19T08:47:50,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741832_1008 (size=32) 2024-11-19T08:47:50,658 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:47:50,659 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T08:47:50,660 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T08:47:50,660 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:50,661 DEBUG [RS:0;3ab37fa97a98:38843 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3ab37fa97a98:38843 2024-11-19T08:47:50,661 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T08:47:50,661 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T08:47:50,661 DEBUG [RS:0;3ab37fa97a98:38843 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T08:47:50,661 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:47:50,661 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T08:47:50,661 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.HRegionServer(2659): reportForDuty to master=3ab37fa97a98,43785,1732006070171 with port=38843, startcode=1732006070302 2024-11-19T08:47:50,662 DEBUG [RS:0;3ab37fa97a98:38843 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T08:47:50,663 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T08:47:50,663 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:50,664 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45063, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T08:47:50,664 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:47:50,664 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T08:47:50,664 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43785 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3ab37fa97a98,38843,1732006070302 2024-11-19T08:47:50,664 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43785 {}] master.ServerManager(517): Registering regionserver=3ab37fa97a98,38843,1732006070302 2024-11-19T08:47:50,666 DEBUG [RS:0;3ab37fa97a98:38843 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28 2024-11-19T08:47:50,666 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T08:47:50,666 DEBUG [RS:0;3ab37fa97a98:38843 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40745 2024-11-19T08:47:50,666 DEBUG [RS:0;3ab37fa97a98:38843 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T08:47:50,666 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:50,667 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:47:50,667 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T08:47:50,669 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T08:47:50,669 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:50,669 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:47:50,669 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T08:47:50,670 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740 2024-11-19T08:47:50,670 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740 2024-11-19T08:47:50,671 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T08:47:50,671 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T08:47:50,672 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T08:47:50,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T08:47:50,678 DEBUG [RS:0;3ab37fa97a98:38843 {}] zookeeper.ZKUtil(111): regionserver:38843-0x1015390da300001, quorum=127.0.0.1:54066, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3ab37fa97a98,38843,1732006070302 2024-11-19T08:47:50,679 WARN [RS:0;3ab37fa97a98:38843 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T08:47:50,679 INFO [RS:0;3ab37fa97a98:38843 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:47:50,679 DEBUG [RS:0;3ab37fa97a98:38843 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/WALs/3ab37fa97a98,38843,1732006070302 2024-11-19T08:47:50,683 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T08:47:50,683 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3ab37fa97a98,38843,1732006070302] 2024-11-19T08:47:50,686 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T08:47:50,686 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=884375, jitterRate=0.12454186379909515}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T08:47:50,687 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732006070658Initializing all the Stores at 1732006070659 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006070659Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006070659Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006070659Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006070659Cleaning up temporary data from old regions at 1732006070671 (+12 ms)Region opened successfully at 1732006070687 (+16 ms) 2024-11-19T08:47:50,687 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T08:47:50,687 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T08:47:50,687 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T08:47:50,687 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T08:47:50,687 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T08:47:50,687 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T08:47:50,687 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T08:47:50,687 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732006070687Disabling compacts and flushes for region at 1732006070687Disabling writes for close at 1732006070687Writing region close event to WAL at 1732006070687Closed at 1732006070687 2024-11-19T08:47:50,689 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:47:50,689 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T08:47:50,689 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T08:47:50,689 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T08:47:50,690 INFO [RS:0;3ab37fa97a98:38843 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T08:47:50,690 INFO [RS:0;3ab37fa97a98:38843 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:50,690 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T08:47:50,690 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T08:47:50,692 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T08:47:50,693 INFO [RS:0;3ab37fa97a98:38843 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T08:47:50,693 INFO [RS:0;3ab37fa97a98:38843 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:50,693 DEBUG [RS:0;3ab37fa97a98:38843 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:50,693 DEBUG [RS:0;3ab37fa97a98:38843 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:50,693 DEBUG [RS:0;3ab37fa97a98:38843 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:50,693 DEBUG [RS:0;3ab37fa97a98:38843 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:50,693 DEBUG [RS:0;3ab37fa97a98:38843 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:50,693 DEBUG [RS:0;3ab37fa97a98:38843 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3ab37fa97a98:0, corePoolSize=2, maxPoolSize=2 2024-11-19T08:47:50,693 DEBUG [RS:0;3ab37fa97a98:38843 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:50,693 DEBUG [RS:0;3ab37fa97a98:38843 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:50,693 DEBUG [RS:0;3ab37fa97a98:38843 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:50,693 DEBUG [RS:0;3ab37fa97a98:38843 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:50,693 DEBUG [RS:0;3ab37fa97a98:38843 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:50,693 DEBUG [RS:0;3ab37fa97a98:38843 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:47:50,693 DEBUG [RS:0;3ab37fa97a98:38843 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3ab37fa97a98:0, corePoolSize=3, maxPoolSize=3 2024-11-19T08:47:50,693 DEBUG [RS:0;3ab37fa97a98:38843 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0, corePoolSize=3, maxPoolSize=3 2024-11-19T08:47:50,695 INFO [RS:0;3ab37fa97a98:38843 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:50,695 INFO [RS:0;3ab37fa97a98:38843 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:50,695 INFO [RS:0;3ab37fa97a98:38843 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:50,695 INFO [RS:0;3ab37fa97a98:38843 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:50,695 INFO [RS:0;3ab37fa97a98:38843 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:50,695 INFO [RS:0;3ab37fa97a98:38843 {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,38843,1732006070302-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T08:47:50,718 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T08:47:50,718 INFO [RS:0;3ab37fa97a98:38843 {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,38843,1732006070302-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:50,718 INFO [RS:0;3ab37fa97a98:38843 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:50,718 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.Replication(171): 3ab37fa97a98,38843,1732006070302 started 2024-11-19T08:47:50,740 INFO [RS:0;3ab37fa97a98:38843 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:50,740 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.HRegionServer(1482): Serving as 3ab37fa97a98,38843,1732006070302, RpcServer on 3ab37fa97a98/172.17.0.2:38843, sessionid=0x1015390da300001 2024-11-19T08:47:50,741 DEBUG [RS:0;3ab37fa97a98:38843 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T08:47:50,741 DEBUG [RS:0;3ab37fa97a98:38843 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3ab37fa97a98,38843,1732006070302 2024-11-19T08:47:50,741 DEBUG [RS:0;3ab37fa97a98:38843 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3ab37fa97a98,38843,1732006070302' 2024-11-19T08:47:50,741 DEBUG [RS:0;3ab37fa97a98:38843 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T08:47:50,742 DEBUG [RS:0;3ab37fa97a98:38843 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T08:47:50,744 DEBUG [RS:0;3ab37fa97a98:38843 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T08:47:50,744 DEBUG [RS:0;3ab37fa97a98:38843 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T08:47:50,744 DEBUG [RS:0;3ab37fa97a98:38843 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3ab37fa97a98,38843,1732006070302 2024-11-19T08:47:50,744 DEBUG [RS:0;3ab37fa97a98:38843 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3ab37fa97a98,38843,1732006070302' 2024-11-19T08:47:50,744 DEBUG [RS:0;3ab37fa97a98:38843 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T08:47:50,744 DEBUG [RS:0;3ab37fa97a98:38843 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T08:47:50,745 DEBUG [RS:0;3ab37fa97a98:38843 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T08:47:50,745 INFO [RS:0;3ab37fa97a98:38843 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T08:47:50,745 INFO [RS:0;3ab37fa97a98:38843 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T08:47:50,842 WARN [3ab37fa97a98:43785 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T08:47:50,847 INFO [RS:0;3ab37fa97a98:38843 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C38843%2C1732006070302, suffix=, logDir=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/WALs/3ab37fa97a98,38843,1732006070302, archiveDir=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/oldWALs, maxLogs=32 2024-11-19T08:47:50,847 INFO [RS:0;3ab37fa97a98:38843 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C38843%2C1732006070302.1732006070847 2024-11-19T08:47:50,856 INFO [RS:0;3ab37fa97a98:38843 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/WALs/3ab37fa97a98,38843,1732006070302/3ab37fa97a98%2C38843%2C1732006070302.1732006070847 2024-11-19T08:47:50,859 DEBUG [RS:0;3ab37fa97a98:38843 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42747:42747),(127.0.0.1/127.0.0.1:41431:41431)] 2024-11-19T08:47:51,092 DEBUG [3ab37fa97a98:43785 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T08:47:51,093 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3ab37fa97a98,38843,1732006070302 2024-11-19T08:47:51,094 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3ab37fa97a98,38843,1732006070302, state=OPENING 2024-11-19T08:47:51,126 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T08:47:51,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:51,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:51,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38843-0x1015390da300001, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:51,185 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T08:47:51,185 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3ab37fa97a98,38843,1732006070302}] 2024-11-19T08:47:51,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:47:51,187 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:47:51,187 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:47:51,338 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T08:47:51,340 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39929, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T08:47:51,344 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T08:47:51,344 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:47:51,346 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C38843%2C1732006070302.meta, suffix=.meta, logDir=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/WALs/3ab37fa97a98,38843,1732006070302, archiveDir=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/oldWALs, maxLogs=32 2024-11-19T08:47:51,347 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C38843%2C1732006070302.meta.1732006071346.meta 2024-11-19T08:47:51,356 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/WALs/3ab37fa97a98,38843,1732006070302/3ab37fa97a98%2C38843%2C1732006070302.meta.1732006071346.meta 2024-11-19T08:47:51,367 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42747:42747),(127.0.0.1/127.0.0.1:41431:41431)] 2024-11-19T08:47:51,375 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T08:47:51,375 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T08:47:51,375 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T08:47:51,376 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T08:47:51,376 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T08:47:51,376 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:47:51,376 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T08:47:51,376 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T08:47:51,379 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T08:47:51,380 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T08:47:51,380 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:51,380 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:47:51,381 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T08:47:51,381 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T08:47:51,381 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:51,382 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:47:51,382 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T08:47:51,383 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T08:47:51,383 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:51,383 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:47:51,384 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T08:47:51,384 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T08:47:51,385 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:51,385 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:47:51,385 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T08:47:51,386 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740 2024-11-19T08:47:51,387 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740 2024-11-19T08:47:51,388 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T08:47:51,388 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T08:47:51,388 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T08:47:51,390 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T08:47:51,391 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=826300, jitterRate=0.05069524049758911}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T08:47:51,391 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T08:47:51,392 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732006071376Writing region info on filesystem at 1732006071376Initializing all the Stores at 1732006071377 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006071377Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006071378 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006071378Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006071378Cleaning up temporary data from old regions at 1732006071388 (+10 ms)Running coprocessor post-open hooks at 1732006071391 (+3 ms)Region opened successfully at 1732006071392 (+1 ms) 2024-11-19T08:47:51,393 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732006071338 2024-11-19T08:47:51,396 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T08:47:51,396 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T08:47:51,397 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3ab37fa97a98,38843,1732006070302 2024-11-19T08:47:51,399 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3ab37fa97a98,38843,1732006070302, state=OPEN 2024-11-19T08:47:51,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T08:47:51,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38843-0x1015390da300001, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T08:47:51,433 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3ab37fa97a98,38843,1732006070302 2024-11-19T08:47:51,433 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:47:51,433 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:47:51,436 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T08:47:51,436 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3ab37fa97a98,38843,1732006070302 in 248 msec 2024-11-19T08:47:51,440 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T08:47:51,440 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 747 msec 2024-11-19T08:47:51,441 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:47:51,441 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T08:47:51,442 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T08:47:51,442 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3ab37fa97a98,38843,1732006070302, seqNum=-1] 2024-11-19T08:47:51,443 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T08:47:51,445 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36219, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T08:47:51,452 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 827 msec 2024-11-19T08:47:51,452 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732006071452, completionTime=-1 2024-11-19T08:47:51,452 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T08:47:51,452 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T08:47:51,454 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T08:47:51,454 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732006131454 2024-11-19T08:47:51,454 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732006191454 2024-11-19T08:47:51,455 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-19T08:47:51,455 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,43785,1732006070171-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:51,455 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,43785,1732006070171-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:51,455 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,43785,1732006070171-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:51,455 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3ab37fa97a98:43785, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:51,455 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:51,456 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:51,458 DEBUG [master/3ab37fa97a98:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T08:47:51,461 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.091sec 2024-11-19T08:47:51,461 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T08:47:51,461 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T08:47:51,461 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T08:47:51,461 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T08:47:51,461 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T08:47:51,461 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,43785,1732006070171-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T08:47:51,461 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,43785,1732006070171-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T08:47:51,464 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T08:47:51,464 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T08:47:51,464 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,43785,1732006070171-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:47:51,529 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@262669c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:47:51,529 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3ab37fa97a98,43785,-1 for getting cluster id 2024-11-19T08:47:51,529 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T08:47:51,531 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '40f1d1fa-8ae1-4908-91e3-f5ec2faec1fd' 2024-11-19T08:47:51,532 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T08:47:51,532 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "40f1d1fa-8ae1-4908-91e3-f5ec2faec1fd" 2024-11-19T08:47:51,532 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b8889fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:47:51,532 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3ab37fa97a98,43785,-1] 2024-11-19T08:47:51,533 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T08:47:51,533 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:47:51,534 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56040, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T08:47:51,535 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d60c13, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:47:51,536 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T08:47:51,537 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3ab37fa97a98,38843,1732006070302, seqNum=-1] 2024-11-19T08:47:51,537 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T08:47:51,538 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46898, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T08:47:51,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3ab37fa97a98,43785,1732006070171 2024-11-19T08:47:51,541 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:47:51,543 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T08:47:51,544 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T08:47:51,545 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 3ab37fa97a98,43785,1732006070171 2024-11-19T08:47:51,545 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@ac02d0a 2024-11-19T08:47:51,545 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T08:47:51,546 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56050, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T08:47:51,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T08:47:51,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T08:47:51,548 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T08:47:51,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T08:47:51,551 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T08:47:51,551 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:51,551 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-19T08:47:51,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T08:47:51,553 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T08:47:51,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741835_1011 (size=405) 2024-11-19T08:47:51,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741835_1011 (size=405) 2024-11-19T08:47:51,565 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5a39e5dcd976b20da6abec304fcf8846, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28 2024-11-19T08:47:51,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741836_1012 (size=88) 2024-11-19T08:47:51,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741836_1012 (size=88) 2024-11-19T08:47:51,574 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:47:51,575 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 5a39e5dcd976b20da6abec304fcf8846, disabling compactions & flushes 2024-11-19T08:47:51,575 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:47:51,575 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:47:51,575 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. after waiting 0 ms 2024-11-19T08:47:51,575 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:47:51,575 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:47:51,575 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5a39e5dcd976b20da6abec304fcf8846: Waiting for close lock at 1732006071575Disabling compacts and flushes for region at 1732006071575Disabling writes for close at 1732006071575Writing region close event to WAL at 1732006071575Closed at 1732006071575 2024-11-19T08:47:51,577 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T08:47:51,577 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732006071577"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732006071577"}]},"ts":"1732006071577"} 2024-11-19T08:47:51,580 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T08:47:51,581 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T08:47:51,582 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732006071582"}]},"ts":"1732006071582"} 2024-11-19T08:47:51,584 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-19T08:47:51,585 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5a39e5dcd976b20da6abec304fcf8846, ASSIGN}] 2024-11-19T08:47:51,587 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5a39e5dcd976b20da6abec304fcf8846, ASSIGN 2024-11-19T08:47:51,588 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5a39e5dcd976b20da6abec304fcf8846, ASSIGN; state=OFFLINE, location=3ab37fa97a98,38843,1732006070302; forceNewPlan=false, retain=false 2024-11-19T08:47:51,739 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5a39e5dcd976b20da6abec304fcf8846, regionState=OPENING, regionLocation=3ab37fa97a98,38843,1732006070302 2024-11-19T08:47:51,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5a39e5dcd976b20da6abec304fcf8846, ASSIGN because future has completed 2024-11-19T08:47:51,742 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5a39e5dcd976b20da6abec304fcf8846, server=3ab37fa97a98,38843,1732006070302}] 2024-11-19T08:47:51,899 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:47:51,899 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 5a39e5dcd976b20da6abec304fcf8846, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846.', STARTKEY => '', ENDKEY => ''} 2024-11-19T08:47:51,900 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 5a39e5dcd976b20da6abec304fcf8846 2024-11-19T08:47:51,900 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:47:51,900 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 5a39e5dcd976b20da6abec304fcf8846 2024-11-19T08:47:51,900 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 5a39e5dcd976b20da6abec304fcf8846 2024-11-19T08:47:51,901 INFO [StoreOpener-5a39e5dcd976b20da6abec304fcf8846-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 5a39e5dcd976b20da6abec304fcf8846 2024-11-19T08:47:51,903 INFO [StoreOpener-5a39e5dcd976b20da6abec304fcf8846-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5a39e5dcd976b20da6abec304fcf8846 columnFamilyName info 2024-11-19T08:47:51,903 DEBUG [StoreOpener-5a39e5dcd976b20da6abec304fcf8846-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:47:51,904 INFO [StoreOpener-5a39e5dcd976b20da6abec304fcf8846-1 {}] regionserver.HStore(327): Store=5a39e5dcd976b20da6abec304fcf8846/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:47:51,904 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 5a39e5dcd976b20da6abec304fcf8846 2024-11-19T08:47:51,904 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846 2024-11-19T08:47:51,905 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846 2024-11-19T08:47:51,905 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 5a39e5dcd976b20da6abec304fcf8846 2024-11-19T08:47:51,905 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 5a39e5dcd976b20da6abec304fcf8846 2024-11-19T08:47:51,907 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 5a39e5dcd976b20da6abec304fcf8846 2024-11-19T08:47:51,909 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T08:47:51,910 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 5a39e5dcd976b20da6abec304fcf8846; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=811364, jitterRate=0.03170385956764221}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T08:47:51,910 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5a39e5dcd976b20da6abec304fcf8846 2024-11-19T08:47:51,911 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 5a39e5dcd976b20da6abec304fcf8846: Running coprocessor pre-open hook at 1732006071900Writing region info on filesystem at 1732006071900Initializing all the Stores at 1732006071901 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006071901Cleaning up temporary data from old regions at 1732006071905 (+4 ms)Running coprocessor post-open hooks at 1732006071910 (+5 ms)Region opened successfully at 1732006071911 (+1 ms) 2024-11-19T08:47:51,912 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846., pid=6, masterSystemTime=1732006071895 2024-11-19T08:47:51,914 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:47:51,914 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:47:51,915 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5a39e5dcd976b20da6abec304fcf8846, regionState=OPEN, openSeqNum=2, regionLocation=3ab37fa97a98,38843,1732006070302 2024-11-19T08:47:51,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5a39e5dcd976b20da6abec304fcf8846, server=3ab37fa97a98,38843,1732006070302 because future has completed 2024-11-19T08:47:51,927 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T08:47:51,928 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 5a39e5dcd976b20da6abec304fcf8846, server=3ab37fa97a98,38843,1732006070302 in 182 msec 2024-11-19T08:47:51,931 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T08:47:51,931 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5a39e5dcd976b20da6abec304fcf8846, ASSIGN in 343 msec 2024-11-19T08:47:51,933 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T08:47:51,933 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732006071933"}]},"ts":"1732006071933"} 2024-11-19T08:47:51,936 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-19T08:47:51,938 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T08:47:51,941 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 390 msec 2024-11-19T08:47:52,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:52,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:52,492 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T08:47:52,492 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T08:47:52,493 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T08:47:52,493 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T08:47:52,493 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T08:47:52,493 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-19T08:47:53,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:53,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:54,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:54,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:55,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:55,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:56,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:56,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:56,377 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,377 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,377 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,377 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,377 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,378 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,398 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,398 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,398 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,399 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,399 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,399 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,403 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,404 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,404 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,910 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T08:47:56,911 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,911 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,942 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,942 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,942 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,945 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:47:56,951 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T08:47:56,951 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-19T08:47:57,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:57,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:58,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:58,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:59,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:47:59,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:00,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:00,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:01,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:01,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:01,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T08:48:01,582 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T08:48:01,582 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-19T08:48:01,586 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T08:48:01,586 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:48:01,590 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846., hostname=3ab37fa97a98,38843,1732006070302, seqNum=2] 2024-11-19T08:48:01,597 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T08:48:01,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T08:48:01,603 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-19T08:48:01,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-19T08:48:01,605 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T08:48:01,606 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T08:48:01,770 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38843 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-19T08:48:01,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:48:01,771 INFO [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 5a39e5dcd976b20da6abec304fcf8846 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T08:48:01,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/.tmp/info/4fda2438a4aa4d16b0a2feb44782beb0 is 1080, key is row0001/info:/1732006081591/Put/seqid=0 2024-11-19T08:48:01,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741837_1013 (size=6033) 2024-11-19T08:48:01,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741837_1013 (size=6033) 2024-11-19T08:48:02,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:02,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:02,197 INFO [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/.tmp/info/4fda2438a4aa4d16b0a2feb44782beb0 2024-11-19T08:48:02,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/.tmp/info/4fda2438a4aa4d16b0a2feb44782beb0 as hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/4fda2438a4aa4d16b0a2feb44782beb0 2024-11-19T08:48:02,210 INFO [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/4fda2438a4aa4d16b0a2feb44782beb0, entries=1, sequenceid=5, filesize=5.9 K 2024-11-19T08:48:02,211 INFO [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5a39e5dcd976b20da6abec304fcf8846 in 440ms, sequenceid=5, compaction requested=false 2024-11-19T08:48:02,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 5a39e5dcd976b20da6abec304fcf8846: 2024-11-19T08:48:02,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:48:02,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-19T08:48:02,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-19T08:48:02,219 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-19T08:48:02,219 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 610 msec 2024-11-19T08:48:02,221 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 621 msec 2024-11-19T08:48:03,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:03,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:04,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:04,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:05,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:05,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:06,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:06,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:07,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:07,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:08,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:08,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:09,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:09,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:10,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:10,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:11,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:11,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:11,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-19T08:48:11,622 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T08:48:11,625 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T08:48:11,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T08:48:11,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-19T08:48:11,627 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-19T08:48:11,628 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T08:48:11,629 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T08:48:11,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38843 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-19T08:48:11,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:48:11,783 INFO [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 5a39e5dcd976b20da6abec304fcf8846 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T08:48:11,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/.tmp/info/f7fc8e3eedac4be5a46e424cc5479a17 is 1080, key is row0002/info:/1732006091623/Put/seqid=0 2024-11-19T08:48:11,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741838_1014 (size=6033) 2024-11-19T08:48:11,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741838_1014 (size=6033) 2024-11-19T08:48:11,795 INFO [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/.tmp/info/f7fc8e3eedac4be5a46e424cc5479a17 2024-11-19T08:48:11,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/.tmp/info/f7fc8e3eedac4be5a46e424cc5479a17 as hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/f7fc8e3eedac4be5a46e424cc5479a17 2024-11-19T08:48:11,811 INFO [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/f7fc8e3eedac4be5a46e424cc5479a17, entries=1, sequenceid=9, filesize=5.9 K 2024-11-19T08:48:11,812 INFO [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5a39e5dcd976b20da6abec304fcf8846 in 30ms, sequenceid=9, compaction requested=false 2024-11-19T08:48:11,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 5a39e5dcd976b20da6abec304fcf8846: 2024-11-19T08:48:11,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:48:11,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-19T08:48:11,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-19T08:48:11,817 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-19T08:48:11,817 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 186 msec 2024-11-19T08:48:11,820 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 193 msec 2024-11-19T08:48:12,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:12,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:13,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:13,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:14,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:14,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:14,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta after 68046ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:48:14,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 after 68062ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T08:48:15,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:15,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:16,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:16,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:17,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:17,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:18,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:18,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:19,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:19,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:20,118 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T08:48:20,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:20,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:21,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:21,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:21,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-19T08:48:21,692 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T08:48:21,696 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C38843%2C1732006070302.1732006101696 2024-11-19T08:48:21,703 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:21,703 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:21,703 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:21,703 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:21,703 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:21,703 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/WALs/3ab37fa97a98,38843,1732006070302/3ab37fa97a98%2C38843%2C1732006070302.1732006070847 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/WALs/3ab37fa97a98,38843,1732006070302/3ab37fa97a98%2C38843%2C1732006070302.1732006101696 2024-11-19T08:48:21,704 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42747:42747),(127.0.0.1/127.0.0.1:41431:41431)] 2024-11-19T08:48:21,705 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/WALs/3ab37fa97a98,38843,1732006070302/3ab37fa97a98%2C38843%2C1732006070302.1732006070847 is not closed yet, will try archiving it next time 2024-11-19T08:48:21,706 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T08:48:21,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741833_1009 (size=5546) 2024-11-19T08:48:21,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741833_1009 (size=5546) 2024-11-19T08:48:21,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T08:48:21,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-19T08:48:21,709 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-19T08:48:21,710 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T08:48:21,710 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T08:48:21,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38843 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-19T08:48:21,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:48:21,864 INFO [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 5a39e5dcd976b20da6abec304fcf8846 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T08:48:21,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/.tmp/info/3cac5fdede594e31bb907d10d79fc7d7 is 1080, key is row0003/info:/1732006101694/Put/seqid=0 2024-11-19T08:48:21,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741840_1016 (size=6033) 2024-11-19T08:48:21,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741840_1016 (size=6033) 2024-11-19T08:48:21,874 INFO [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/.tmp/info/3cac5fdede594e31bb907d10d79fc7d7 2024-11-19T08:48:21,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/.tmp/info/3cac5fdede594e31bb907d10d79fc7d7 as hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/3cac5fdede594e31bb907d10d79fc7d7 2024-11-19T08:48:21,887 INFO [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/3cac5fdede594e31bb907d10d79fc7d7, entries=1, sequenceid=13, filesize=5.9 K 2024-11-19T08:48:21,888 INFO [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5a39e5dcd976b20da6abec304fcf8846 in 24ms, sequenceid=13, compaction requested=true 2024-11-19T08:48:21,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 5a39e5dcd976b20da6abec304fcf8846: 2024-11-19T08:48:21,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:48:21,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-19T08:48:21,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-19T08:48:21,891 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-19T08:48:21,891 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-11-19T08:48:21,894 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 186 msec 2024-11-19T08:48:22,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:22,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:23,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:23,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:24,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:24,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:25,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:25,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:26,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:26,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:27,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:27,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:28,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:28,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:29,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:29,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:30,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:30,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:31,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:31,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:31,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-19T08:48:31,811 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T08:48:31,812 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T08:48:31,813 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T08:48:31,814 DEBUG [Time-limited test {}] regionserver.HStore(1541): 5a39e5dcd976b20da6abec304fcf8846/info is initiating minor compaction (all files) 2024-11-19T08:48:31,814 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T08:48:31,814 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:31,814 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 5a39e5dcd976b20da6abec304fcf8846/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:48:31,814 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/4fda2438a4aa4d16b0a2feb44782beb0, hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/f7fc8e3eedac4be5a46e424cc5479a17, hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/3cac5fdede594e31bb907d10d79fc7d7] into tmpdir=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/.tmp, totalSize=17.7 K 2024-11-19T08:48:31,815 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 4fda2438a4aa4d16b0a2feb44782beb0, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732006081591 2024-11-19T08:48:31,815 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting f7fc8e3eedac4be5a46e424cc5479a17, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732006091623 2024-11-19T08:48:31,816 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 3cac5fdede594e31bb907d10d79fc7d7, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732006101694 2024-11-19T08:48:31,827 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 5a39e5dcd976b20da6abec304fcf8846#info#compaction#44 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T08:48:31,828 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/.tmp/info/57b0b347d26c4e999b37c4b8fb3118d8 is 1080, key is row0001/info:/1732006081591/Put/seqid=0 2024-11-19T08:48:31,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741841_1017 (size=8296) 2024-11-19T08:48:31,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741841_1017 (size=8296) 2024-11-19T08:48:31,840 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/.tmp/info/57b0b347d26c4e999b37c4b8fb3118d8 as hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/57b0b347d26c4e999b37c4b8fb3118d8 2024-11-19T08:48:31,848 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 5a39e5dcd976b20da6abec304fcf8846/info of 5a39e5dcd976b20da6abec304fcf8846 into 57b0b347d26c4e999b37c4b8fb3118d8(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T08:48:31,848 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 5a39e5dcd976b20da6abec304fcf8846: 2024-11-19T08:48:31,851 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C38843%2C1732006070302.1732006111851 2024-11-19T08:48:31,861 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:31,861 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:31,862 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:31,862 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:31,862 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:31,862 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/WALs/3ab37fa97a98,38843,1732006070302/3ab37fa97a98%2C38843%2C1732006070302.1732006101696 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/WALs/3ab37fa97a98,38843,1732006070302/3ab37fa97a98%2C38843%2C1732006070302.1732006111851 2024-11-19T08:48:31,863 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41431:41431),(127.0.0.1/127.0.0.1:42747:42747)] 2024-11-19T08:48:31,863 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/WALs/3ab37fa97a98,38843,1732006070302/3ab37fa97a98%2C38843%2C1732006070302.1732006101696 is not closed yet, will try archiving it next time 2024-11-19T08:48:31,863 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/WALs/3ab37fa97a98,38843,1732006070302/3ab37fa97a98%2C38843%2C1732006070302.1732006070847 to hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/oldWALs/3ab37fa97a98%2C38843%2C1732006070302.1732006070847 2024-11-19T08:48:31,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741839_1015 (size=2520) 2024-11-19T08:48:31,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741839_1015 (size=2520) 2024-11-19T08:48:31,864 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T08:48:31,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T08:48:31,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-19T08:48:31,867 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-19T08:48:31,868 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T08:48:31,868 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T08:48:31,957 INFO [master/3ab37fa97a98:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T08:48:31,957 INFO [master/3ab37fa97a98:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T08:48:32,021 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38843 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-19T08:48:32,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:48:32,021 INFO [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 5a39e5dcd976b20da6abec304fcf8846 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T08:48:32,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/.tmp/info/d6274314a0794cd7b85d116f311512a6 is 1080, key is row0000/info:/1732006111849/Put/seqid=0 2024-11-19T08:48:32,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741843_1019 (size=6033) 2024-11-19T08:48:32,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741843_1019 (size=6033) 2024-11-19T08:48:32,031 INFO [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/.tmp/info/d6274314a0794cd7b85d116f311512a6 2024-11-19T08:48:32,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/.tmp/info/d6274314a0794cd7b85d116f311512a6 as hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/d6274314a0794cd7b85d116f311512a6 2024-11-19T08:48:32,045 INFO [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/d6274314a0794cd7b85d116f311512a6, entries=1, sequenceid=18, filesize=5.9 K 2024-11-19T08:48:32,046 INFO [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5a39e5dcd976b20da6abec304fcf8846 in 25ms, sequenceid=18, compaction requested=false 2024-11-19T08:48:32,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 5a39e5dcd976b20da6abec304fcf8846: 2024-11-19T08:48:32,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:48:32,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-19T08:48:32,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-19T08:48:32,051 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-19T08:48:32,051 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 180 msec 2024-11-19T08:48:32,053 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 188 msec 2024-11-19T08:48:32,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:32,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:33,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:33,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:34,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:34,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:35,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:35,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:36,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:36,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:36,900 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5a39e5dcd976b20da6abec304fcf8846, had cached 0 bytes from a total of 14329 2024-11-19T08:48:37,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:37,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:38,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:38,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:39,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:39,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:40,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:40,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:41,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:41,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:41,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43785 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-19T08:48:41,952 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T08:48:41,955 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C38843%2C1732006070302.1732006121955 2024-11-19T08:48:41,970 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:41,970 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:41,970 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:41,970 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:41,970 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:41,970 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/WALs/3ab37fa97a98,38843,1732006070302/3ab37fa97a98%2C38843%2C1732006070302.1732006111851 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/WALs/3ab37fa97a98,38843,1732006070302/3ab37fa97a98%2C38843%2C1732006070302.1732006121955 2024-11-19T08:48:41,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741842_1018 (size=2026) 2024-11-19T08:48:41,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741842_1018 (size=2026) 2024-11-19T08:48:41,972 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/WALs/3ab37fa97a98,38843,1732006070302/3ab37fa97a98%2C38843%2C1732006070302.1732006101696 to hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/oldWALs/3ab37fa97a98%2C38843%2C1732006070302.1732006101696 2024-11-19T08:48:41,983 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42747:42747),(127.0.0.1/127.0.0.1:41431:41431)] 2024-11-19T08:48:41,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T08:48:41,983 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T08:48:41,983 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:48:41,983 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:48:41,983 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:48:41,983 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T08:48:41,983 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T08:48:41,983 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1670006093, stopped=false 2024-11-19T08:48:41,984 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3ab37fa97a98,43785,1732006070171 2024-11-19T08:48:42,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T08:48:42,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:48:42,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38843-0x1015390da300001, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T08:48:42,036 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T08:48:42,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38843-0x1015390da300001, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:48:42,036 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T08:48:42,036 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:48:42,036 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:48:42,037 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3ab37fa97a98,38843,1732006070302' ***** 2024-11-19T08:48:42,037 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T08:48:42,037 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38843-0x1015390da300001, quorum=127.0.0.1:54066, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:48:42,037 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T08:48:42,037 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:48:42,037 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T08:48:42,037 INFO [RS:0;3ab37fa97a98:38843 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T08:48:42,037 INFO [RS:0;3ab37fa97a98:38843 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T08:48:42,037 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.HRegionServer(3091): Received CLOSE for 5a39e5dcd976b20da6abec304fcf8846 2024-11-19T08:48:42,037 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.HRegionServer(959): stopping server 3ab37fa97a98,38843,1732006070302 2024-11-19T08:48:42,037 INFO [RS:0;3ab37fa97a98:38843 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T08:48:42,038 INFO [RS:0;3ab37fa97a98:38843 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3ab37fa97a98:38843. 2024-11-19T08:48:42,038 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5a39e5dcd976b20da6abec304fcf8846, disabling compactions & flushes 2024-11-19T08:48:42,038 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:48:42,038 DEBUG [RS:0;3ab37fa97a98:38843 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:48:42,038 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:48:42,038 DEBUG [RS:0;3ab37fa97a98:38843 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:48:42,038 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. after waiting 0 ms 2024-11-19T08:48:42,038 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:48:42,038 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T08:48:42,038 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T08:48:42,038 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T08:48:42,038 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 5a39e5dcd976b20da6abec304fcf8846 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T08:48:42,038 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T08:48:42,038 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T08:48:42,038 DEBUG [RS:0;3ab37fa97a98:38843 {}] regionserver.HRegionServer(1325): Online Regions={5a39e5dcd976b20da6abec304fcf8846=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846., 1588230740=hbase:meta,,1.1588230740} 2024-11-19T08:48:42,038 DEBUG [RS:0;3ab37fa97a98:38843 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 5a39e5dcd976b20da6abec304fcf8846 2024-11-19T08:48:42,038 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T08:48:42,038 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T08:48:42,038 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T08:48:42,038 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T08:48:42,038 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T08:48:42,038 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-19T08:48:42,042 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/.tmp/info/c39fe184d816447d84f6ea83394c1979 is 1080, key is row0001/info:/1732006121953/Put/seqid=0 2024-11-19T08:48:42,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741845_1021 (size=6033) 2024-11-19T08:48:42,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741845_1021 (size=6033) 2024-11-19T08:48:42,047 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/.tmp/info/c39fe184d816447d84f6ea83394c1979 2024-11-19T08:48:42,054 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/.tmp/info/c39fe184d816447d84f6ea83394c1979 as hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/c39fe184d816447d84f6ea83394c1979 2024-11-19T08:48:42,056 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740/.tmp/info/47abfc0895e94531ba363cb7eaafb0e7 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846./info:regioninfo/1732006071915/Put/seqid=0 2024-11-19T08:48:42,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741846_1022 (size=7308) 2024-11-19T08:48:42,061 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740/.tmp/info/47abfc0895e94531ba363cb7eaafb0e7 2024-11-19T08:48:42,061 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/c39fe184d816447d84f6ea83394c1979, entries=1, sequenceid=22, filesize=5.9 K 2024-11-19T08:48:42,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741846_1022 (size=7308) 2024-11-19T08:48:42,062 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5a39e5dcd976b20da6abec304fcf8846 in 24ms, sequenceid=22, compaction requested=true 2024-11-19T08:48:42,062 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/4fda2438a4aa4d16b0a2feb44782beb0, hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/f7fc8e3eedac4be5a46e424cc5479a17, hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/3cac5fdede594e31bb907d10d79fc7d7] to archive 2024-11-19T08:48:42,063 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T08:48:42,066 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/4fda2438a4aa4d16b0a2feb44782beb0 to hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/4fda2438a4aa4d16b0a2feb44782beb0 2024-11-19T08:48:42,067 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/f7fc8e3eedac4be5a46e424cc5479a17 to hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/f7fc8e3eedac4be5a46e424cc5479a17 2024-11-19T08:48:42,069 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/3cac5fdede594e31bb907d10d79fc7d7 to hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/info/3cac5fdede594e31bb907d10d79fc7d7 2024-11-19T08:48:42,069 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3ab37fa97a98:43785 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-19T08:48:42,070 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [4fda2438a4aa4d16b0a2feb44782beb0=6033, f7fc8e3eedac4be5a46e424cc5479a17=6033, 3cac5fdede594e31bb907d10d79fc7d7=6033] 2024-11-19T08:48:42,075 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5a39e5dcd976b20da6abec304fcf8846/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-19T08:48:42,076 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:48:42,076 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5a39e5dcd976b20da6abec304fcf8846: Waiting for close lock at 1732006122038Running coprocessor pre-close hooks at 1732006122038Disabling compacts and flushes for region at 1732006122038Disabling writes for close at 1732006122038Obtaining lock to block concurrent updates at 1732006122038Preparing flush snapshotting stores in 5a39e5dcd976b20da6abec304fcf8846 at 1732006122038Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732006122038Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. at 1732006122039 (+1 ms)Flushing 5a39e5dcd976b20da6abec304fcf8846/info: creating writer at 1732006122039Flushing 5a39e5dcd976b20da6abec304fcf8846/info: appending metadata at 1732006122041 (+2 ms)Flushing 5a39e5dcd976b20da6abec304fcf8846/info: closing flushed file at 1732006122041Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c6654af: reopening flushed file at 1732006122053 (+12 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5a39e5dcd976b20da6abec304fcf8846 in 24ms, sequenceid=22, compaction requested=true at 1732006122062 (+9 ms)Writing region close event to WAL at 1732006122070 (+8 ms)Running coprocessor post-close hooks at 1732006122076 (+6 ms)Closed at 1732006122076 2024-11-19T08:48:42,076 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732006071547.5a39e5dcd976b20da6abec304fcf8846. 2024-11-19T08:48:42,088 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740/.tmp/ns/f574e3f6ea0640cb8e1d3f6bc8079adb is 43, key is default/ns:d/1732006071445/Put/seqid=0 2024-11-19T08:48:42,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741847_1023 (size=5153) 2024-11-19T08:48:42,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741847_1023 (size=5153) 2024-11-19T08:48:42,094 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740/.tmp/ns/f574e3f6ea0640cb8e1d3f6bc8079adb 2024-11-19T08:48:42,119 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740/.tmp/table/6d680de0267944f1ad8392bc41b814a0 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732006071933/Put/seqid=0 2024-11-19T08:48:42,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741848_1024 (size=5508) 2024-11-19T08:48:42,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741848_1024 (size=5508) 2024-11-19T08:48:42,125 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740/.tmp/table/6d680de0267944f1ad8392bc41b814a0 2024-11-19T08:48:42,131 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740/.tmp/info/47abfc0895e94531ba363cb7eaafb0e7 as hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740/info/47abfc0895e94531ba363cb7eaafb0e7 2024-11-19T08:48:42,137 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740/info/47abfc0895e94531ba363cb7eaafb0e7, entries=10, sequenceid=11, filesize=7.1 K 2024-11-19T08:48:42,138 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740/.tmp/ns/f574e3f6ea0640cb8e1d3f6bc8079adb as hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740/ns/f574e3f6ea0640cb8e1d3f6bc8079adb 2024-11-19T08:48:42,143 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740/ns/f574e3f6ea0640cb8e1d3f6bc8079adb, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T08:48:42,144 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740/.tmp/table/6d680de0267944f1ad8392bc41b814a0 as hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740/table/6d680de0267944f1ad8392bc41b814a0 2024-11-19T08:48:42,150 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740/table/6d680de0267944f1ad8392bc41b814a0, entries=2, sequenceid=11, filesize=5.4 K 2024-11-19T08:48:42,151 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 113ms, sequenceid=11, compaction requested=false 2024-11-19T08:48:42,160 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T08:48:42,161 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T08:48:42,161 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T08:48:42,161 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732006122038Running coprocessor pre-close hooks at 1732006122038Disabling compacts and flushes for region at 1732006122038Disabling writes for close at 1732006122038Obtaining lock to block concurrent updates at 1732006122038Preparing flush snapshotting stores in 1588230740 at 1732006122038Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732006122039 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732006122039Flushing 1588230740/info: creating writer at 1732006122039Flushing 1588230740/info: appending metadata at 1732006122055 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732006122055Flushing 1588230740/ns: creating writer at 1732006122067 (+12 ms)Flushing 1588230740/ns: appending metadata at 1732006122087 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1732006122087Flushing 1588230740/table: creating writer at 1732006122099 (+12 ms)Flushing 1588230740/table: appending metadata at 1732006122118 (+19 ms)Flushing 1588230740/table: closing flushed file at 1732006122119 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e533d5e: reopening flushed file at 1732006122130 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c898b05: reopening flushed file at 1732006122137 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17477493: reopening flushed file at 1732006122143 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 113ms, sequenceid=11, compaction requested=false at 1732006122151 (+8 ms)Writing region close event to WAL at 1732006122157 (+6 ms)Running coprocessor post-close hooks at 1732006122161 (+4 ms)Closed at 1732006122161 2024-11-19T08:48:42,161 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T08:48:42,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:42,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:42,238 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.HRegionServer(976): stopping server 3ab37fa97a98,38843,1732006070302; all regions closed. 2024-11-19T08:48:42,239 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:42,239 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:42,239 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:42,240 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:42,240 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:42,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741834_1010 (size=3306) 2024-11-19T08:48:42,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741834_1010 (size=3306) 2024-11-19T08:48:42,246 DEBUG [RS:0;3ab37fa97a98:38843 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/oldWALs 2024-11-19T08:48:42,246 INFO [RS:0;3ab37fa97a98:38843 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3ab37fa97a98%2C38843%2C1732006070302.meta:.meta(num 1732006071346) 2024-11-19T08:48:42,247 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:42,247 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:42,247 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:42,247 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:42,247 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:42,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741844_1020 (size=1252) 2024-11-19T08:48:42,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741844_1020 (size=1252) 2024-11-19T08:48:42,253 DEBUG [RS:0;3ab37fa97a98:38843 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/oldWALs 2024-11-19T08:48:42,253 INFO [RS:0;3ab37fa97a98:38843 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3ab37fa97a98%2C38843%2C1732006070302:(num 1732006121955) 2024-11-19T08:48:42,253 DEBUG [RS:0;3ab37fa97a98:38843 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:48:42,253 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T08:48:42,253 INFO [RS:0;3ab37fa97a98:38843 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T08:48:42,253 INFO [RS:0;3ab37fa97a98:38843 {}] hbase.ChoreService(370): Chore service for: regionserver/3ab37fa97a98:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-19T08:48:42,253 INFO [RS:0;3ab37fa97a98:38843 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T08:48:42,253 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T08:48:42,253 INFO [RS:0;3ab37fa97a98:38843 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38843 2024-11-19T08:48:42,286 INFO [RS:0;3ab37fa97a98:38843 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T08:48:42,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T08:48:42,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38843-0x1015390da300001, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3ab37fa97a98,38843,1732006070302 2024-11-19T08:48:42,294 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3ab37fa97a98,38843,1732006070302] 2024-11-19T08:48:42,315 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3ab37fa97a98,38843,1732006070302 already deleted, retry=false 2024-11-19T08:48:42,315 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3ab37fa97a98,38843,1732006070302 expired; onlineServers=0 2024-11-19T08:48:42,315 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3ab37fa97a98,43785,1732006070171' ***** 2024-11-19T08:48:42,315 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T08:48:42,315 INFO [M:0;3ab37fa97a98:43785 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T08:48:42,315 INFO [M:0;3ab37fa97a98:43785 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T08:48:42,315 DEBUG [M:0;3ab37fa97a98:43785 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T08:48:42,315 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T08:48:42,315 DEBUG [M:0;3ab37fa97a98:43785 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T08:48:42,315 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732006070631 {}] cleaner.HFileCleaner(306): Exit Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732006070631,5,FailOnTimeoutGroup] 2024-11-19T08:48:42,315 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732006070631 {}] cleaner.HFileCleaner(306): Exit Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732006070631,5,FailOnTimeoutGroup] 2024-11-19T08:48:42,315 INFO [M:0;3ab37fa97a98:43785 {}] hbase.ChoreService(370): Chore service for: master/3ab37fa97a98:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T08:48:42,315 INFO [M:0;3ab37fa97a98:43785 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T08:48:42,315 DEBUG [M:0;3ab37fa97a98:43785 {}] master.HMaster(1795): Stopping service threads 2024-11-19T08:48:42,315 INFO [M:0;3ab37fa97a98:43785 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T08:48:42,316 INFO [M:0;3ab37fa97a98:43785 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T08:48:42,316 INFO [M:0;3ab37fa97a98:43785 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T08:48:42,316 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T08:48:42,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T08:48:42,325 DEBUG [M:0;3ab37fa97a98:43785 {}] zookeeper.ZKUtil(347): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T08:48:42,325 WARN [M:0;3ab37fa97a98:43785 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T08:48:42,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:48:42,325 INFO [M:0;3ab37fa97a98:43785 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/.lastflushedseqids 2024-11-19T08:48:42,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741849_1025 (size=130) 2024-11-19T08:48:42,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741849_1025 (size=130) 2024-11-19T08:48:42,331 INFO [M:0;3ab37fa97a98:43785 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T08:48:42,331 INFO [M:0;3ab37fa97a98:43785 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T08:48:42,331 DEBUG [M:0;3ab37fa97a98:43785 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T08:48:42,331 INFO [M:0;3ab37fa97a98:43785 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:48:42,331 DEBUG [M:0;3ab37fa97a98:43785 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:48:42,331 DEBUG [M:0;3ab37fa97a98:43785 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T08:48:42,331 DEBUG [M:0;3ab37fa97a98:43785 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:48:42,331 INFO [M:0;3ab37fa97a98:43785 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.60 KB heapSize=55.01 KB 2024-11-19T08:48:42,352 DEBUG [M:0;3ab37fa97a98:43785 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c10a8543f9964496a57a853ceda2dc28 is 82, key is hbase:meta,,1/info:regioninfo/1732006071397/Put/seqid=0 2024-11-19T08:48:42,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741850_1026 (size=5672) 2024-11-19T08:48:42,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741850_1026 (size=5672) 2024-11-19T08:48:42,356 INFO [M:0;3ab37fa97a98:43785 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c10a8543f9964496a57a853ceda2dc28 2024-11-19T08:48:42,380 DEBUG [M:0;3ab37fa97a98:43785 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c4334e789c8e4599808eafb0e94b8fa5 is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732006071940/Put/seqid=0 2024-11-19T08:48:42,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741851_1027 (size=7824) 2024-11-19T08:48:42,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741851_1027 (size=7824) 2024-11-19T08:48:42,385 INFO [M:0;3ab37fa97a98:43785 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.00 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c4334e789c8e4599808eafb0e94b8fa5 2024-11-19T08:48:42,390 INFO [M:0;3ab37fa97a98:43785 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c4334e789c8e4599808eafb0e94b8fa5 2024-11-19T08:48:42,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38843-0x1015390da300001, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:48:42,394 INFO [RS:0;3ab37fa97a98:38843 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T08:48:42,394 INFO [RS:0;3ab37fa97a98:38843 {}] regionserver.HRegionServer(1031): Exiting; stopping=3ab37fa97a98,38843,1732006070302; zookeeper connection closed. 2024-11-19T08:48:42,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38843-0x1015390da300001, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:48:42,395 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1cb20d40 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1cb20d40 2024-11-19T08:48:42,395 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T08:48:42,408 DEBUG [M:0;3ab37fa97a98:43785 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2839fb2b2c1b4b1b83c7072d12689224 is 69, key is 3ab37fa97a98,38843,1732006070302/rs:state/1732006070665/Put/seqid=0 2024-11-19T08:48:42,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741852_1028 (size=5156) 2024-11-19T08:48:42,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741852_1028 (size=5156) 2024-11-19T08:48:42,413 INFO [M:0;3ab37fa97a98:43785 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2839fb2b2c1b4b1b83c7072d12689224 2024-11-19T08:48:42,435 DEBUG [M:0;3ab37fa97a98:43785 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d8c4875d831b4b8da65a47a1faad59c7 is 52, key is load_balancer_on/state:d/1732006071542/Put/seqid=0 2024-11-19T08:48:42,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741853_1029 (size=5056) 2024-11-19T08:48:42,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741853_1029 (size=5056) 2024-11-19T08:48:42,442 INFO [M:0;3ab37fa97a98:43785 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d8c4875d831b4b8da65a47a1faad59c7 2024-11-19T08:48:42,448 DEBUG [M:0;3ab37fa97a98:43785 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c10a8543f9964496a57a853ceda2dc28 as hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c10a8543f9964496a57a853ceda2dc28 2024-11-19T08:48:42,454 INFO [M:0;3ab37fa97a98:43785 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c10a8543f9964496a57a853ceda2dc28, entries=8, sequenceid=121, filesize=5.5 K 2024-11-19T08:48:42,455 DEBUG [M:0;3ab37fa97a98:43785 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c4334e789c8e4599808eafb0e94b8fa5 as hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c4334e789c8e4599808eafb0e94b8fa5 2024-11-19T08:48:42,461 INFO [M:0;3ab37fa97a98:43785 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c4334e789c8e4599808eafb0e94b8fa5 2024-11-19T08:48:42,461 INFO [M:0;3ab37fa97a98:43785 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c4334e789c8e4599808eafb0e94b8fa5, entries=14, sequenceid=121, filesize=7.6 K 2024-11-19T08:48:42,462 DEBUG [M:0;3ab37fa97a98:43785 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2839fb2b2c1b4b1b83c7072d12689224 as hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2839fb2b2c1b4b1b83c7072d12689224 2024-11-19T08:48:42,468 INFO [M:0;3ab37fa97a98:43785 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2839fb2b2c1b4b1b83c7072d12689224, entries=1, sequenceid=121, filesize=5.0 K 2024-11-19T08:48:42,469 DEBUG [M:0;3ab37fa97a98:43785 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d8c4875d831b4b8da65a47a1faad59c7 as hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d8c4875d831b4b8da65a47a1faad59c7 2024-11-19T08:48:42,475 INFO [M:0;3ab37fa97a98:43785 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40745/user/jenkins/test-data/f1659733-c52b-aba7-9e9e-60138ac8ff28/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d8c4875d831b4b8da65a47a1faad59c7, entries=1, sequenceid=121, filesize=4.9 K 2024-11-19T08:48:42,476 INFO [M:0;3ab37fa97a98:43785 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 145ms, sequenceid=121, compaction requested=false 2024-11-19T08:48:42,478 INFO [M:0;3ab37fa97a98:43785 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:48:42,478 DEBUG [M:0;3ab37fa97a98:43785 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732006122331Disabling compacts and flushes for region at 1732006122331Disabling writes for close at 1732006122331Obtaining lock to block concurrent updates at 1732006122331Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732006122331Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44650, getHeapSize=56264, getOffHeapSize=0, getCellsCount=140 at 1732006122332 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732006122332Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732006122332Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732006122351 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732006122351Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732006122361 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732006122379 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732006122379Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732006122390 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732006122407 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732006122407Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732006122418 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732006122435 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732006122435Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2629f3c6: reopening flushed file at 1732006122447 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e9ab053: reopening flushed file at 1732006122454 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@39754699: reopening flushed file at 1732006122461 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@60e2d3e: reopening flushed file at 1732006122468 (+7 ms)Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 145ms, sequenceid=121, compaction requested=false at 1732006122476 (+8 ms)Writing region close event to WAL at 1732006122477 (+1 ms)Closed at 1732006122478 (+1 ms) 2024-11-19T08:48:42,478 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:42,478 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:42,478 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:42,478 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:42,478 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:48:42,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38083 is added to blk_1073741830_1006 (size=53047) 2024-11-19T08:48:42,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741830_1006 (size=53047) 2024-11-19T08:48:42,481 INFO [M:0;3ab37fa97a98:43785 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T08:48:42,481 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T08:48:42,481 INFO [M:0;3ab37fa97a98:43785 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43785 2024-11-19T08:48:42,481 INFO [M:0;3ab37fa97a98:43785 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T08:48:42,492 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T08:48:42,492 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T08:48:42,493 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T08:48:42,493 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T08:48:42,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:48:42,608 INFO [M:0;3ab37fa97a98:43785 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T08:48:42,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43785-0x1015390da300000, quorum=127.0.0.1:54066, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:48:42,656 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2fb77c6e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:48:42,656 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@57887c0e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:48:42,656 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:48:42,656 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b127990{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:48:42,656 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67556f4d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/hadoop.log.dir/,STOPPED} 2024-11-19T08:48:42,658 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:48:42,658 WARN [BP-324031568-172.17.0.2-1732006068345 heartbeating to localhost/127.0.0.1:40745 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:48:42,658 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:48:42,658 WARN [BP-324031568-172.17.0.2-1732006068345 heartbeating to localhost/127.0.0.1:40745 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-324031568-172.17.0.2-1732006068345 (Datanode Uuid fb2b8bba-1ef6-4634-b7be-6d14749bffc2) service to localhost/127.0.0.1:40745 2024-11-19T08:48:42,659 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/cluster_816fbdd1-9850-0245-19de-8bfee4fb83e7/data/data3/current/BP-324031568-172.17.0.2-1732006068345 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:48:42,659 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/cluster_816fbdd1-9850-0245-19de-8bfee4fb83e7/data/data4/current/BP-324031568-172.17.0.2-1732006068345 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:48:42,659 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:48:42,661 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@454902dc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:48:42,662 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6b792597{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:48:42,662 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:48:42,662 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77bac3c5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:48:42,662 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2725554a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/hadoop.log.dir/,STOPPED} 2024-11-19T08:48:42,664 WARN [BP-324031568-172.17.0.2-1732006068345 heartbeating to localhost/127.0.0.1:40745 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:48:42,664 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:48:42,664 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:48:42,664 WARN [BP-324031568-172.17.0.2-1732006068345 heartbeating to localhost/127.0.0.1:40745 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-324031568-172.17.0.2-1732006068345 (Datanode Uuid eb7a44ac-3990-4446-8cba-d1fc32692a86) service to localhost/127.0.0.1:40745 2024-11-19T08:48:42,664 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/cluster_816fbdd1-9850-0245-19de-8bfee4fb83e7/data/data1/current/BP-324031568-172.17.0.2-1732006068345 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:48:42,664 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/cluster_816fbdd1-9850-0245-19de-8bfee4fb83e7/data/data2/current/BP-324031568-172.17.0.2-1732006068345 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:48:42,665 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:48:42,671 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4af78b23{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T08:48:42,672 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@45ec22ac{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:48:42,672 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:48:42,672 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3adf1c78{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:48:42,672 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a03003f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/hadoop.log.dir/,STOPPED} 2024-11-19T08:48:42,679 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T08:48:42,698 INFO [regionserver/3ab37fa97a98:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T08:48:42,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T08:48:42,709 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 180) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40745 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40745 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:40745 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40745 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:40745 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:40745 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40745 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:40745 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40745 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=485 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=219 (was 242), ProcessCount=11 (was 11), AvailableMemoryMB=5027 (was 5167) 2024-11-19T08:48:42,717 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=485, MaxFileDescriptor=1048576, SystemLoadAverage=219, ProcessCount=11, AvailableMemoryMB=5027 2024-11-19T08:48:42,717 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T08:48:42,717 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/hadoop.log.dir so I do NOT create it in target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae 2024-11-19T08:48:42,717 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0620a00e-4ed3-72d8-637d-b834ce7e180c/hadoop.tmp.dir so I do NOT create it in target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae 2024-11-19T08:48:42,717 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/cluster_0914bb55-7897-0867-53f1-aa9cb98b056b, deleteOnExit=true 2024-11-19T08:48:42,717 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T08:48:42,717 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/test.cache.data in system properties and HBase conf 2024-11-19T08:48:42,717 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T08:48:42,717 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/hadoop.log.dir in system properties and HBase conf 2024-11-19T08:48:42,718 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T08:48:42,718 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T08:48:42,718 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T08:48:42,718 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T08:48:42,718 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T08:48:42,718 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T08:48:42,718 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T08:48:42,718 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T08:48:42,718 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T08:48:42,718 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T08:48:42,719 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T08:48:42,719 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T08:48:42,719 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T08:48:42,719 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/nfs.dump.dir in system properties and HBase conf 2024-11-19T08:48:42,719 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/java.io.tmpdir in system properties and HBase conf 2024-11-19T08:48:42,719 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T08:48:42,719 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T08:48:42,719 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T08:48:42,734 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T08:48:42,994 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:48:42,999 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:48:43,000 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:48:43,000 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:48:43,000 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T08:48:43,001 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:48:43,001 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@53e82728{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:48:43,001 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4419cf95{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:48:43,104 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@d6c94d4{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/java.io.tmpdir/jetty-localhost-36939-hadoop-hdfs-3_4_1-tests_jar-_-any-12478423203809357939/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T08:48:43,104 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4dcfcc47{HTTP/1.1, (http/1.1)}{localhost:36939} 2024-11-19T08:48:43,104 INFO [Time-limited test {}] server.Server(415): Started @247632ms 2024-11-19T08:48:43,116 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T08:48:43,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:43,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:43,320 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:48:43,323 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:48:43,324 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:48:43,324 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:48:43,324 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T08:48:43,325 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e1d4a93{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:48:43,325 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@549b308b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:48:43,425 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@d0d4aa4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/java.io.tmpdir/jetty-localhost-46013-hadoop-hdfs-3_4_1-tests_jar-_-any-14363636114203394377/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:48:43,425 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5cec8e5{HTTP/1.1, (http/1.1)}{localhost:46013} 2024-11-19T08:48:43,425 INFO [Time-limited test {}] server.Server(415): Started @247953ms 2024-11-19T08:48:43,426 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:48:43,454 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:48:43,456 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:48:43,457 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:48:43,457 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:48:43,457 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T08:48:43,458 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d3adf0e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:48:43,458 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1be3ce1b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:48:43,557 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@79aff83f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/java.io.tmpdir/jetty-localhost-33671-hadoop-hdfs-3_4_1-tests_jar-_-any-15221589862487740382/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:48:43,557 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@295546a4{HTTP/1.1, (http/1.1)}{localhost:33671} 2024-11-19T08:48:43,557 INFO [Time-limited test {}] server.Server(415): Started @248085ms 2024-11-19T08:48:43,558 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:48:44,125 WARN [Thread-1964 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/cluster_0914bb55-7897-0867-53f1-aa9cb98b056b/data/data2/current/BP-115775057-172.17.0.2-1732006122738/current, will proceed with Du for space computation calculation, 2024-11-19T08:48:44,125 WARN [Thread-1963 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/cluster_0914bb55-7897-0867-53f1-aa9cb98b056b/data/data1/current/BP-115775057-172.17.0.2-1732006122738/current, will proceed with Du for space computation calculation, 2024-11-19T08:48:44,141 WARN [Thread-1927 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:48:44,143 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x16626fd144e075e4 with lease ID 0x249e612db5ffa542: Processing first storage report for DS-855c0321-7ba7-4cc8-b054-75eec8ea6a28 from datanode DatanodeRegistration(127.0.0.1:38013, datanodeUuid=5e386ada-2388-44c6-99ef-1a5a4fd6b3c4, infoPort=33099, infoSecurePort=0, ipcPort=34467, storageInfo=lv=-57;cid=testClusterID;nsid=1129159618;c=1732006122738) 2024-11-19T08:48:44,143 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x16626fd144e075e4 with lease ID 0x249e612db5ffa542: from storage DS-855c0321-7ba7-4cc8-b054-75eec8ea6a28 node DatanodeRegistration(127.0.0.1:38013, datanodeUuid=5e386ada-2388-44c6-99ef-1a5a4fd6b3c4, infoPort=33099, infoSecurePort=0, ipcPort=34467, storageInfo=lv=-57;cid=testClusterID;nsid=1129159618;c=1732006122738), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T08:48:44,143 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x16626fd144e075e4 with lease ID 0x249e612db5ffa542: Processing first storage report for DS-9c24be6f-dcd6-4e6a-998d-c08e04b6f054 from datanode DatanodeRegistration(127.0.0.1:38013, datanodeUuid=5e386ada-2388-44c6-99ef-1a5a4fd6b3c4, infoPort=33099, infoSecurePort=0, ipcPort=34467, storageInfo=lv=-57;cid=testClusterID;nsid=1129159618;c=1732006122738) 2024-11-19T08:48:44,144 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x16626fd144e075e4 with lease ID 0x249e612db5ffa542: from storage DS-9c24be6f-dcd6-4e6a-998d-c08e04b6f054 node DatanodeRegistration(127.0.0.1:38013, datanodeUuid=5e386ada-2388-44c6-99ef-1a5a4fd6b3c4, infoPort=33099, infoSecurePort=0, ipcPort=34467, storageInfo=lv=-57;cid=testClusterID;nsid=1129159618;c=1732006122738), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:48:44,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:44,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:44,249 WARN [Thread-1974 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/cluster_0914bb55-7897-0867-53f1-aa9cb98b056b/data/data3/current/BP-115775057-172.17.0.2-1732006122738/current, will proceed with Du for space computation calculation, 2024-11-19T08:48:44,250 WARN [Thread-1975 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/cluster_0914bb55-7897-0867-53f1-aa9cb98b056b/data/data4/current/BP-115775057-172.17.0.2-1732006122738/current, will proceed with Du for space computation calculation, 2024-11-19T08:48:44,270 WARN [Thread-1950 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:48:44,272 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4530dd6fc90c179 with lease ID 0x249e612db5ffa543: Processing first storage report for DS-e975266e-6e11-401b-be95-bf98bc1e82bf from datanode DatanodeRegistration(127.0.0.1:34735, datanodeUuid=e2be23fc-6a78-4958-b22e-8ceca0ac6a28, infoPort=37747, infoSecurePort=0, ipcPort=38085, storageInfo=lv=-57;cid=testClusterID;nsid=1129159618;c=1732006122738) 2024-11-19T08:48:44,272 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4530dd6fc90c179 with lease ID 0x249e612db5ffa543: from storage DS-e975266e-6e11-401b-be95-bf98bc1e82bf node DatanodeRegistration(127.0.0.1:34735, datanodeUuid=e2be23fc-6a78-4958-b22e-8ceca0ac6a28, infoPort=37747, infoSecurePort=0, ipcPort=38085, storageInfo=lv=-57;cid=testClusterID;nsid=1129159618;c=1732006122738), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:48:44,272 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4530dd6fc90c179 with lease ID 0x249e612db5ffa543: Processing first storage report for DS-87f075f9-5c29-4093-92f0-bdf7ea917753 from datanode DatanodeRegistration(127.0.0.1:34735, datanodeUuid=e2be23fc-6a78-4958-b22e-8ceca0ac6a28, infoPort=37747, infoSecurePort=0, ipcPort=38085, storageInfo=lv=-57;cid=testClusterID;nsid=1129159618;c=1732006122738) 2024-11-19T08:48:44,272 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4530dd6fc90c179 with lease ID 0x249e612db5ffa543: from storage DS-87f075f9-5c29-4093-92f0-bdf7ea917753 node DatanodeRegistration(127.0.0.1:34735, datanodeUuid=e2be23fc-6a78-4958-b22e-8ceca0ac6a28, infoPort=37747, infoSecurePort=0, ipcPort=38085, storageInfo=lv=-57;cid=testClusterID;nsid=1129159618;c=1732006122738), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:48:44,284 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae 2024-11-19T08:48:44,287 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/cluster_0914bb55-7897-0867-53f1-aa9cb98b056b/zookeeper_0, clientPort=61360, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/cluster_0914bb55-7897-0867-53f1-aa9cb98b056b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/cluster_0914bb55-7897-0867-53f1-aa9cb98b056b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T08:48:44,288 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61360 2024-11-19T08:48:44,288 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:48:44,289 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:48:44,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741825_1001 (size=7) 2024-11-19T08:48:44,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741825_1001 (size=7) 2024-11-19T08:48:44,298 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc with version=8 2024-11-19T08:48:44,298 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/hbase-staging 2024-11-19T08:48:44,301 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3ab37fa97a98:0 server-side Connection retries=45 2024-11-19T08:48:44,301 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:48:44,301 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T08:48:44,301 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T08:48:44,301 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:48:44,301 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T08:48:44,301 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T08:48:44,301 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T08:48:44,302 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38369 2024-11-19T08:48:44,304 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38369 connecting to ZooKeeper ensemble=127.0.0.1:61360 2024-11-19T08:48:44,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:383690x0, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T08:48:44,372 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38369-0x1015391adc30000 connected 2024-11-19T08:48:44,438 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:48:44,440 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:48:44,442 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:48:44,442 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc, hbase.cluster.distributed=false 2024-11-19T08:48:44,444 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T08:48:44,444 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38369 2024-11-19T08:48:44,445 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38369 2024-11-19T08:48:44,445 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38369 2024-11-19T08:48:44,445 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38369 2024-11-19T08:48:44,445 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38369 2024-11-19T08:48:44,465 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3ab37fa97a98:0 server-side Connection retries=45 2024-11-19T08:48:44,465 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:48:44,465 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T08:48:44,465 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T08:48:44,465 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:48:44,465 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T08:48:44,465 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T08:48:44,465 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T08:48:44,466 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43395 2024-11-19T08:48:44,468 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43395 connecting to ZooKeeper ensemble=127.0.0.1:61360 2024-11-19T08:48:44,468 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:48:44,470 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:48:44,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:433950x0, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T08:48:44,480 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43395-0x1015391adc30001 connected 2024-11-19T08:48:44,480 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:48:44,481 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T08:48:44,481 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T08:48:44,482 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T08:48:44,483 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T08:48:44,483 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43395 2024-11-19T08:48:44,484 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43395 2024-11-19T08:48:44,484 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43395 2024-11-19T08:48:44,484 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43395 2024-11-19T08:48:44,485 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43395 2024-11-19T08:48:44,499 DEBUG [M:0;3ab37fa97a98:38369 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3ab37fa97a98:38369 2024-11-19T08:48:44,500 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3ab37fa97a98,38369,1732006124300 2024-11-19T08:48:44,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:48:44,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:48:44,505 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3ab37fa97a98,38369,1732006124300 2024-11-19T08:48:44,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:48:44,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T08:48:44,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:48:44,514 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T08:48:44,514 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3ab37fa97a98,38369,1732006124300 from backup master directory 2024-11-19T08:48:44,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3ab37fa97a98,38369,1732006124300 2024-11-19T08:48:44,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:48:44,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:48:44,522 WARN [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T08:48:44,522 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3ab37fa97a98,38369,1732006124300 2024-11-19T08:48:44,526 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/hbase.id] with ID: b4a288e0-f16d-4046-a33b-819775fe5af3 2024-11-19T08:48:44,526 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/.tmp/hbase.id 2024-11-19T08:48:44,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741826_1002 (size=42) 2024-11-19T08:48:44,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741826_1002 (size=42) 2024-11-19T08:48:44,532 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/.tmp/hbase.id]:[hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/hbase.id] 2024-11-19T08:48:44,543 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:48:44,544 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T08:48:44,545 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-19T08:48:44,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:48:44,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:48:44,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741827_1003 (size=196) 2024-11-19T08:48:44,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741827_1003 (size=196) 2024-11-19T08:48:44,565 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T08:48:44,565 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T08:48:44,566 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:48:44,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741828_1004 (size=1189) 2024-11-19T08:48:44,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741828_1004 (size=1189) 2024-11-19T08:48:44,574 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store 2024-11-19T08:48:44,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741829_1005 (size=34) 2024-11-19T08:48:44,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741829_1005 (size=34) 2024-11-19T08:48:44,580 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:48:44,580 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T08:48:44,581 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:48:44,581 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:48:44,581 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T08:48:44,581 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:48:44,581 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:48:44,581 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732006124580Disabling compacts and flushes for region at 1732006124580Disabling writes for close at 1732006124581 (+1 ms)Writing region close event to WAL at 1732006124581Closed at 1732006124581 2024-11-19T08:48:44,581 WARN [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/.initializing 2024-11-19T08:48:44,582 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/WALs/3ab37fa97a98,38369,1732006124300 2024-11-19T08:48:44,584 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C38369%2C1732006124300, suffix=, logDir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/WALs/3ab37fa97a98,38369,1732006124300, archiveDir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/oldWALs, maxLogs=10 2024-11-19T08:48:44,585 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C38369%2C1732006124300.1732006124584 2024-11-19T08:48:44,590 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/WALs/3ab37fa97a98,38369,1732006124300/3ab37fa97a98%2C38369%2C1732006124300.1732006124584 2024-11-19T08:48:44,601 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37747:37747),(127.0.0.1/127.0.0.1:33099:33099)] 2024-11-19T08:48:44,602 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T08:48:44,602 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:48:44,602 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:48:44,602 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:48:44,607 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:48:44,608 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T08:48:44,608 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:48:44,608 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:48:44,608 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:48:44,609 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T08:48:44,610 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:48:44,610 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:48:44,610 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:48:44,611 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T08:48:44,611 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:48:44,612 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:48:44,612 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:48:44,613 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T08:48:44,613 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:48:44,613 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:48:44,613 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:48:44,614 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:48:44,614 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:48:44,615 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:48:44,615 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:48:44,616 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T08:48:44,617 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:48:44,619 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T08:48:44,619 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=806287, jitterRate=0.025247350335121155}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T08:48:44,620 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732006124602Initializing all the Stores at 1732006124603 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006124603Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006124606 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006124606Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006124606Cleaning up temporary data from old regions at 1732006124615 (+9 ms)Region opened successfully at 1732006124620 (+5 ms) 2024-11-19T08:48:44,620 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T08:48:44,624 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fd63e39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3ab37fa97a98/172.17.0.2:0 2024-11-19T08:48:44,625 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T08:48:44,625 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T08:48:44,625 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T08:48:44,625 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T08:48:44,626 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T08:48:44,626 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T08:48:44,626 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T08:48:44,628 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T08:48:44,629 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T08:48:44,644 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T08:48:44,645 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T08:48:44,645 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T08:48:44,655 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T08:48:44,655 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T08:48:44,656 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T08:48:44,663 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T08:48:44,664 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T08:48:44,671 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T08:48:44,674 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T08:48:44,680 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T08:48:44,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T08:48:44,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T08:48:44,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:48:44,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:48:44,689 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3ab37fa97a98,38369,1732006124300, sessionid=0x1015391adc30000, setting cluster-up flag (Was=false) 2024-11-19T08:48:44,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:48:44,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:48:44,730 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T08:48:44,732 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3ab37fa97a98,38369,1732006124300 2024-11-19T08:48:44,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:48:44,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:48:44,771 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T08:48:44,772 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3ab37fa97a98,38369,1732006124300 2024-11-19T08:48:44,774 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T08:48:44,775 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T08:48:44,776 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T08:48:44,776 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T08:48:44,776 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3ab37fa97a98,38369,1732006124300 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T08:48:44,777 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:48:44,777 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:48:44,777 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:48:44,777 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:48:44,777 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3ab37fa97a98:0, corePoolSize=10, maxPoolSize=10 2024-11-19T08:48:44,777 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:48:44,777 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=2, maxPoolSize=2 2024-11-19T08:48:44,777 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:48:44,779 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:48:44,779 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T08:48:44,780 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732006154780 2024-11-19T08:48:44,780 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T08:48:44,780 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T08:48:44,780 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T08:48:44,780 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T08:48:44,781 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T08:48:44,781 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T08:48:44,781 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:48:44,781 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:44,781 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T08:48:44,781 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T08:48:44,781 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T08:48:44,781 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T08:48:44,781 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T08:48:44,781 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T08:48:44,782 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732006124782,5,FailOnTimeoutGroup] 2024-11-19T08:48:44,782 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732006124782,5,FailOnTimeoutGroup] 2024-11-19T08:48:44,782 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:44,782 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T08:48:44,782 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:44,782 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:44,792 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.HRegionServer(746): ClusterId : b4a288e0-f16d-4046-a33b-819775fe5af3 2024-11-19T08:48:44,792 DEBUG [RS:0;3ab37fa97a98:43395 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T08:48:44,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741831_1007 (size=1321) 2024-11-19T08:48:44,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741831_1007 (size=1321) 2024-11-19T08:48:44,793 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T08:48:44,793 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc 2024-11-19T08:48:44,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741832_1008 (size=32) 2024-11-19T08:48:44,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:48:44,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741832_1008 (size=32) 2024-11-19T08:48:44,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T08:48:44,803 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T08:48:44,803 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:48:44,803 DEBUG [RS:0;3ab37fa97a98:43395 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T08:48:44,803 DEBUG [RS:0;3ab37fa97a98:43395 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T08:48:44,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:48:44,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T08:48:44,805 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T08:48:44,805 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:48:44,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:48:44,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T08:48:44,807 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T08:48:44,807 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:48:44,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:48:44,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T08:48:44,809 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T08:48:44,809 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:48:44,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:48:44,809 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T08:48:44,810 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740 2024-11-19T08:48:44,810 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740 2024-11-19T08:48:44,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T08:48:44,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T08:48:44,812 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T08:48:44,814 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T08:48:44,814 DEBUG [RS:0;3ab37fa97a98:43395 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T08:48:44,814 DEBUG [RS:0;3ab37fa97a98:43395 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74586adc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3ab37fa97a98/172.17.0.2:0 2024-11-19T08:48:44,816 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T08:48:44,816 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=695732, jitterRate=-0.11533191800117493}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T08:48:44,817 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732006124800Initializing all the Stores at 1732006124801 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006124801Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006124801Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006124801Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006124801Cleaning up temporary data from old regions at 1732006124812 (+11 ms)Region opened successfully at 1732006124817 (+5 ms) 2024-11-19T08:48:44,817 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T08:48:44,817 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T08:48:44,817 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T08:48:44,817 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T08:48:44,817 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T08:48:44,821 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T08:48:44,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732006124817Disabling compacts and flushes for region at 1732006124817Disabling writes for close at 1732006124817Writing region close event to WAL at 1732006124821 (+4 ms)Closed at 1732006124821 2024-11-19T08:48:44,823 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:48:44,823 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T08:48:44,823 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T08:48:44,824 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T08:48:44,825 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T08:48:44,828 DEBUG [RS:0;3ab37fa97a98:43395 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3ab37fa97a98:43395 2024-11-19T08:48:44,828 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T08:48:44,828 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T08:48:44,828 DEBUG [RS:0;3ab37fa97a98:43395 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T08:48:44,829 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.HRegionServer(2659): reportForDuty to master=3ab37fa97a98,38369,1732006124300 with port=43395, startcode=1732006124464 2024-11-19T08:48:44,829 DEBUG [RS:0;3ab37fa97a98:43395 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T08:48:44,837 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57303, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T08:48:44,838 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38369 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3ab37fa97a98,43395,1732006124464 2024-11-19T08:48:44,838 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38369 {}] master.ServerManager(517): Registering regionserver=3ab37fa97a98,43395,1732006124464 2024-11-19T08:48:44,840 DEBUG [RS:0;3ab37fa97a98:43395 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc 2024-11-19T08:48:44,840 DEBUG [RS:0;3ab37fa97a98:43395 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33937 2024-11-19T08:48:44,840 DEBUG [RS:0;3ab37fa97a98:43395 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T08:48:44,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T08:48:44,847 DEBUG [RS:0;3ab37fa97a98:43395 {}] zookeeper.ZKUtil(111): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3ab37fa97a98,43395,1732006124464 2024-11-19T08:48:44,847 WARN [RS:0;3ab37fa97a98:43395 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T08:48:44,847 INFO [RS:0;3ab37fa97a98:43395 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:48:44,847 DEBUG [RS:0;3ab37fa97a98:43395 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/WALs/3ab37fa97a98,43395,1732006124464 2024-11-19T08:48:44,847 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3ab37fa97a98,43395,1732006124464] 2024-11-19T08:48:44,850 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T08:48:44,853 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T08:48:44,853 INFO [RS:0;3ab37fa97a98:43395 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T08:48:44,853 INFO [RS:0;3ab37fa97a98:43395 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:44,853 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T08:48:44,854 INFO [RS:0;3ab37fa97a98:43395 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T08:48:44,854 INFO [RS:0;3ab37fa97a98:43395 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:44,854 DEBUG [RS:0;3ab37fa97a98:43395 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:48:44,855 DEBUG [RS:0;3ab37fa97a98:43395 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:48:44,855 DEBUG [RS:0;3ab37fa97a98:43395 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:48:44,855 DEBUG [RS:0;3ab37fa97a98:43395 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:48:44,855 DEBUG [RS:0;3ab37fa97a98:43395 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:48:44,855 DEBUG [RS:0;3ab37fa97a98:43395 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3ab37fa97a98:0, corePoolSize=2, maxPoolSize=2 2024-11-19T08:48:44,855 DEBUG [RS:0;3ab37fa97a98:43395 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:48:44,855 DEBUG [RS:0;3ab37fa97a98:43395 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:48:44,855 DEBUG [RS:0;3ab37fa97a98:43395 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:48:44,855 DEBUG [RS:0;3ab37fa97a98:43395 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:48:44,855 DEBUG [RS:0;3ab37fa97a98:43395 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:48:44,855 DEBUG [RS:0;3ab37fa97a98:43395 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:48:44,855 DEBUG [RS:0;3ab37fa97a98:43395 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3ab37fa97a98:0, corePoolSize=3, maxPoolSize=3 2024-11-19T08:48:44,855 DEBUG [RS:0;3ab37fa97a98:43395 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0, corePoolSize=3, maxPoolSize=3 2024-11-19T08:48:44,856 INFO [RS:0;3ab37fa97a98:43395 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:44,856 INFO [RS:0;3ab37fa97a98:43395 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:44,856 INFO [RS:0;3ab37fa97a98:43395 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:44,856 INFO [RS:0;3ab37fa97a98:43395 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:44,856 INFO [RS:0;3ab37fa97a98:43395 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:44,856 INFO [RS:0;3ab37fa97a98:43395 {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,43395,1732006124464-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T08:48:44,873 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T08:48:44,873 INFO [RS:0;3ab37fa97a98:43395 {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,43395,1732006124464-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:44,873 INFO [RS:0;3ab37fa97a98:43395 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:44,873 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.Replication(171): 3ab37fa97a98,43395,1732006124464 started 2024-11-19T08:48:44,887 INFO [RS:0;3ab37fa97a98:43395 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:44,887 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.HRegionServer(1482): Serving as 3ab37fa97a98,43395,1732006124464, RpcServer on 3ab37fa97a98/172.17.0.2:43395, sessionid=0x1015391adc30001 2024-11-19T08:48:44,888 DEBUG [RS:0;3ab37fa97a98:43395 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T08:48:44,888 DEBUG [RS:0;3ab37fa97a98:43395 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3ab37fa97a98,43395,1732006124464 2024-11-19T08:48:44,888 DEBUG [RS:0;3ab37fa97a98:43395 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3ab37fa97a98,43395,1732006124464' 2024-11-19T08:48:44,888 DEBUG [RS:0;3ab37fa97a98:43395 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T08:48:44,888 DEBUG [RS:0;3ab37fa97a98:43395 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T08:48:44,889 DEBUG [RS:0;3ab37fa97a98:43395 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T08:48:44,889 DEBUG [RS:0;3ab37fa97a98:43395 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T08:48:44,889 DEBUG [RS:0;3ab37fa97a98:43395 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3ab37fa97a98,43395,1732006124464 2024-11-19T08:48:44,889 DEBUG [RS:0;3ab37fa97a98:43395 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3ab37fa97a98,43395,1732006124464' 2024-11-19T08:48:44,889 DEBUG [RS:0;3ab37fa97a98:43395 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T08:48:44,889 DEBUG [RS:0;3ab37fa97a98:43395 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T08:48:44,889 DEBUG [RS:0;3ab37fa97a98:43395 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T08:48:44,889 INFO [RS:0;3ab37fa97a98:43395 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T08:48:44,889 INFO [RS:0;3ab37fa97a98:43395 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T08:48:44,976 WARN [3ab37fa97a98:38369 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T08:48:44,992 INFO [RS:0;3ab37fa97a98:43395 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C43395%2C1732006124464, suffix=, logDir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/WALs/3ab37fa97a98,43395,1732006124464, archiveDir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/oldWALs, maxLogs=32 2024-11-19T08:48:44,993 INFO [RS:0;3ab37fa97a98:43395 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C43395%2C1732006124464.1732006124993 2024-11-19T08:48:45,001 INFO [RS:0;3ab37fa97a98:43395 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/WALs/3ab37fa97a98,43395,1732006124464/3ab37fa97a98%2C43395%2C1732006124464.1732006124993 2024-11-19T08:48:45,002 DEBUG [RS:0;3ab37fa97a98:43395 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33099:33099),(127.0.0.1/127.0.0.1:37747:37747)] 2024-11-19T08:48:45,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:45,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:45,226 DEBUG [3ab37fa97a98:38369 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T08:48:45,226 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3ab37fa97a98,43395,1732006124464 2024-11-19T08:48:45,228 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3ab37fa97a98,43395,1732006124464, state=OPENING 2024-11-19T08:48:45,261 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T08:48:45,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:48:45,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:48:45,272 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T08:48:45,272 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:48:45,272 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:48:45,272 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3ab37fa97a98,43395,1732006124464}] 2024-11-19T08:48:45,425 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T08:48:45,428 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57995, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T08:48:45,433 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T08:48:45,433 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:48:45,435 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C43395%2C1732006124464.meta, suffix=.meta, logDir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/WALs/3ab37fa97a98,43395,1732006124464, archiveDir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/oldWALs, maxLogs=32 2024-11-19T08:48:45,436 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C43395%2C1732006124464.meta.1732006125436.meta 2024-11-19T08:48:45,444 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/WALs/3ab37fa97a98,43395,1732006124464/3ab37fa97a98%2C43395%2C1732006124464.meta.1732006125436.meta 2024-11-19T08:48:45,445 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37747:37747),(127.0.0.1/127.0.0.1:33099:33099)] 2024-11-19T08:48:45,446 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T08:48:45,447 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T08:48:45,447 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T08:48:45,447 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T08:48:45,447 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T08:48:45,447 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:48:45,447 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T08:48:45,447 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T08:48:45,449 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T08:48:45,449 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T08:48:45,449 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:48:45,450 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:48:45,450 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T08:48:45,451 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T08:48:45,451 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:48:45,451 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:48:45,451 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T08:48:45,452 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T08:48:45,452 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:48:45,453 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:48:45,453 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T08:48:45,453 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T08:48:45,454 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:48:45,454 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:48:45,454 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T08:48:45,455 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740 2024-11-19T08:48:45,456 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740 2024-11-19T08:48:45,458 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T08:48:45,458 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T08:48:45,458 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T08:48:45,460 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T08:48:45,460 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=735431, jitterRate=-0.06485183537006378}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T08:48:45,461 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T08:48:45,461 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732006125447Writing region info on filesystem at 1732006125447Initializing all the Stores at 1732006125448 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006125448Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006125448Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006125448Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006125448Cleaning up temporary data from old regions at 1732006125458 (+10 ms)Running coprocessor post-open hooks at 1732006125461 (+3 ms)Region opened successfully at 1732006125461 2024-11-19T08:48:45,462 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732006125425 2024-11-19T08:48:45,465 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T08:48:45,465 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T08:48:45,466 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3ab37fa97a98,43395,1732006124464 2024-11-19T08:48:45,467 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3ab37fa97a98,43395,1732006124464, state=OPEN 2024-11-19T08:48:45,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T08:48:45,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T08:48:45,498 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3ab37fa97a98,43395,1732006124464 2024-11-19T08:48:45,498 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:48:45,498 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:48:45,502 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T08:48:45,502 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3ab37fa97a98,43395,1732006124464 in 226 msec 2024-11-19T08:48:45,505 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T08:48:45,505 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 679 msec 2024-11-19T08:48:45,506 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:48:45,506 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T08:48:45,507 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T08:48:45,507 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3ab37fa97a98,43395,1732006124464, seqNum=-1] 2024-11-19T08:48:45,508 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T08:48:45,509 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55517, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T08:48:45,515 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 739 msec 2024-11-19T08:48:45,515 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732006125515, completionTime=-1 2024-11-19T08:48:45,516 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T08:48:45,516 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T08:48:45,518 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T08:48:45,518 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732006185518 2024-11-19T08:48:45,518 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732006245518 2024-11-19T08:48:45,518 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-19T08:48:45,518 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,38369,1732006124300-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:45,518 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,38369,1732006124300-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:45,518 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,38369,1732006124300-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:45,519 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3ab37fa97a98:38369, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:45,519 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:45,519 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:45,521 DEBUG [master/3ab37fa97a98:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T08:48:45,523 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.001sec 2024-11-19T08:48:45,523 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T08:48:45,523 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T08:48:45,523 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T08:48:45,523 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T08:48:45,523 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T08:48:45,523 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,38369,1732006124300-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T08:48:45,523 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,38369,1732006124300-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T08:48:45,526 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T08:48:45,526 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T08:48:45,526 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,38369,1732006124300-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:48:45,591 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@550aa84c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:48:45,591 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3ab37fa97a98,38369,-1 for getting cluster id 2024-11-19T08:48:45,592 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T08:48:45,593 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b4a288e0-f16d-4046-a33b-819775fe5af3' 2024-11-19T08:48:45,594 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T08:48:45,594 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b4a288e0-f16d-4046-a33b-819775fe5af3" 2024-11-19T08:48:45,594 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@754209db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:48:45,594 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3ab37fa97a98,38369,-1] 2024-11-19T08:48:45,594 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T08:48:45,595 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:48:45,596 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44816, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T08:48:45,597 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f9a27d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:48:45,597 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T08:48:45,599 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3ab37fa97a98,43395,1732006124464, seqNum=-1] 2024-11-19T08:48:45,599 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T08:48:45,600 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34592, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T08:48:45,603 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3ab37fa97a98,38369,1732006124300 2024-11-19T08:48:45,603 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:48:45,606 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T08:48:45,606 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T08:48:45,607 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 3ab37fa97a98,38369,1732006124300 2024-11-19T08:48:45,607 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@14bddac3 2024-11-19T08:48:45,608 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T08:48:45,609 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44818, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T08:48:45,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38369 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T08:48:45,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38369 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T08:48:45,610 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38369 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T08:48:45,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38369 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-19T08:48:45,613 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T08:48:45,613 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:48:45,613 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38369 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-19T08:48:45,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T08:48:45,614 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T08:48:45,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741835_1011 (size=381) 2024-11-19T08:48:45,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741835_1011 (size=381) 2024-11-19T08:48:45,623 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => bc4de71ea654c058906436fa4960c66c, NAME => 'TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc 2024-11-19T08:48:45,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741836_1012 (size=64) 2024-11-19T08:48:45,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741836_1012 (size=64) 2024-11-19T08:48:45,630 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:48:45,630 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing bc4de71ea654c058906436fa4960c66c, disabling compactions & flushes 2024-11-19T08:48:45,630 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c. 2024-11-19T08:48:45,630 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c. 2024-11-19T08:48:45,630 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c. after waiting 0 ms 2024-11-19T08:48:45,630 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c. 2024-11-19T08:48:45,630 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c. 2024-11-19T08:48:45,630 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for bc4de71ea654c058906436fa4960c66c: Waiting for close lock at 1732006125630Disabling compacts and flushes for region at 1732006125630Disabling writes for close at 1732006125630Writing region close event to WAL at 1732006125630Closed at 1732006125630 2024-11-19T08:48:45,631 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T08:48:45,632 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732006125631"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732006125631"}]},"ts":"1732006125631"} 2024-11-19T08:48:45,634 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T08:48:45,635 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T08:48:45,635 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732006125635"}]},"ts":"1732006125635"} 2024-11-19T08:48:45,637 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-19T08:48:45,638 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bc4de71ea654c058906436fa4960c66c, ASSIGN}] 2024-11-19T08:48:45,639 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bc4de71ea654c058906436fa4960c66c, ASSIGN 2024-11-19T08:48:45,640 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bc4de71ea654c058906436fa4960c66c, ASSIGN; state=OFFLINE, location=3ab37fa97a98,43395,1732006124464; forceNewPlan=false, retain=false 2024-11-19T08:48:45,791 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bc4de71ea654c058906436fa4960c66c, regionState=OPENING, regionLocation=3ab37fa97a98,43395,1732006124464 2024-11-19T08:48:45,793 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bc4de71ea654c058906436fa4960c66c, ASSIGN because future has completed 2024-11-19T08:48:45,794 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bc4de71ea654c058906436fa4960c66c, server=3ab37fa97a98,43395,1732006124464}] 2024-11-19T08:48:45,951 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c. 2024-11-19T08:48:45,952 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => bc4de71ea654c058906436fa4960c66c, NAME => 'TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c.', STARTKEY => '', ENDKEY => ''} 2024-11-19T08:48:45,952 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling bc4de71ea654c058906436fa4960c66c 2024-11-19T08:48:45,952 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:48:45,952 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for bc4de71ea654c058906436fa4960c66c 2024-11-19T08:48:45,952 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for bc4de71ea654c058906436fa4960c66c 2024-11-19T08:48:45,954 INFO [StoreOpener-bc4de71ea654c058906436fa4960c66c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region bc4de71ea654c058906436fa4960c66c 2024-11-19T08:48:45,956 INFO [StoreOpener-bc4de71ea654c058906436fa4960c66c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bc4de71ea654c058906436fa4960c66c columnFamilyName info 2024-11-19T08:48:45,956 DEBUG [StoreOpener-bc4de71ea654c058906436fa4960c66c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:48:45,957 INFO [StoreOpener-bc4de71ea654c058906436fa4960c66c-1 {}] regionserver.HStore(327): Store=bc4de71ea654c058906436fa4960c66c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:48:45,957 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for bc4de71ea654c058906436fa4960c66c 2024-11-19T08:48:45,958 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c 2024-11-19T08:48:45,958 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c 2024-11-19T08:48:45,959 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for bc4de71ea654c058906436fa4960c66c 2024-11-19T08:48:45,959 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for bc4de71ea654c058906436fa4960c66c 2024-11-19T08:48:45,961 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for bc4de71ea654c058906436fa4960c66c 2024-11-19T08:48:45,963 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T08:48:45,964 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened bc4de71ea654c058906436fa4960c66c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=873899, jitterRate=0.11122065782546997}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T08:48:45,964 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bc4de71ea654c058906436fa4960c66c 2024-11-19T08:48:45,964 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for bc4de71ea654c058906436fa4960c66c: Running coprocessor pre-open hook at 1732006125953Writing region info on filesystem at 1732006125953Initializing all the Stores at 1732006125954 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006125954Cleaning up temporary data from old regions at 1732006125959 (+5 ms)Running coprocessor post-open hooks at 1732006125964 (+5 ms)Region opened successfully at 1732006125964 2024-11-19T08:48:45,965 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c., pid=6, masterSystemTime=1732006125946 2024-11-19T08:48:45,967 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c. 2024-11-19T08:48:45,967 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c. 2024-11-19T08:48:45,968 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bc4de71ea654c058906436fa4960c66c, regionState=OPEN, openSeqNum=2, regionLocation=3ab37fa97a98,43395,1732006124464 2024-11-19T08:48:45,970 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bc4de71ea654c058906436fa4960c66c, server=3ab37fa97a98,43395,1732006124464 because future has completed 2024-11-19T08:48:45,974 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T08:48:45,974 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure bc4de71ea654c058906436fa4960c66c, server=3ab37fa97a98,43395,1732006124464 in 178 msec 2024-11-19T08:48:45,976 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T08:48:45,976 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bc4de71ea654c058906436fa4960c66c, ASSIGN in 336 msec 2024-11-19T08:48:45,977 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T08:48:45,977 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732006125977"}]},"ts":"1732006125977"} 2024-11-19T08:48:45,980 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-19T08:48:45,981 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T08:48:45,983 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 371 msec 2024-11-19T08:48:46,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:46,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:47,076 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,076 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,077 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,077 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,077 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,077 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,077 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,077 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,098 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,098 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,098 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,098 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,099 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,099 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,104 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,105 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,105 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,108 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:47,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:47,613 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T08:48:47,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,615 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,615 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,615 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,616 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,616 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,617 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,617 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,648 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,648 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,648 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,649 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,649 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,649 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,654 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,654 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,654 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:47,657 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:48:48,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:48,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:49,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:49,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:50,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:50,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:50,851 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T08:48:50,851 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-19T08:48:51,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:51,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:52,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:52,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:52,492 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T08:48:52,492 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T08:48:52,493 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T08:48:52,493 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T08:48:52,493 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T08:48:52,493 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-19T08:48:52,494 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-19T08:48:52,494 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-19T08:48:53,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:53,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:54,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:54,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:55,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:55,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:55,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T08:48:55,642 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-19T08:48:55,642 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-19T08:48:55,645 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-19T08:48:55,645 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c. 2024-11-19T08:48:55,648 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c., hostname=3ab37fa97a98,43395,1732006124464, seqNum=2] 2024-11-19T08:48:55,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on bc4de71ea654c058906436fa4960c66c 2024-11-19T08:48:55,697 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bc4de71ea654c058906436fa4960c66c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T08:48:55,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp/info/4472078a4cbf4120a4ca8bd8732aa98e is 1080, key is row0001/info:/1732006135649/Put/seqid=0 2024-11-19T08:48:55,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741837_1013 (size=12509) 2024-11-19T08:48:55,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741837_1013 (size=12509) 2024-11-19T08:48:55,722 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp/info/4472078a4cbf4120a4ca8bd8732aa98e 2024-11-19T08:48:55,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp/info/4472078a4cbf4120a4ca8bd8732aa98e as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4472078a4cbf4120a4ca8bd8732aa98e 2024-11-19T08:48:55,738 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4472078a4cbf4120a4ca8bd8732aa98e, entries=7, sequenceid=11, filesize=12.2 K 2024-11-19T08:48:55,739 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=19.96 KB/20444 for bc4de71ea654c058906436fa4960c66c in 42ms, sequenceid=11, compaction requested=false 2024-11-19T08:48:55,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bc4de71ea654c058906436fa4960c66c: 2024-11-19T08:48:55,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on bc4de71ea654c058906436fa4960c66c 2024-11-19T08:48:55,741 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bc4de71ea654c058906436fa4960c66c 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-11-19T08:48:55,748 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp/info/fa37aede3db84ff5a317cb65a3901c8b is 1080, key is row0008/info:/1732006135699/Put/seqid=0 2024-11-19T08:48:55,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741838_1014 (size=26530) 2024-11-19T08:48:55,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741838_1014 (size=26530) 2024-11-19T08:48:55,754 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp/info/fa37aede3db84ff5a317cb65a3901c8b 2024-11-19T08:48:55,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp/info/fa37aede3db84ff5a317cb65a3901c8b as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/fa37aede3db84ff5a317cb65a3901c8b 2024-11-19T08:48:55,766 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/fa37aede3db84ff5a317cb65a3901c8b, entries=20, sequenceid=34, filesize=25.9 K 2024-11-19T08:48:55,767 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=5.25 KB/5380 for bc4de71ea654c058906436fa4960c66c in 26ms, sequenceid=34, compaction requested=false 2024-11-19T08:48:55,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bc4de71ea654c058906436fa4960c66c: 2024-11-19T08:48:55,767 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=38.1 K, sizeToCheck=16.0 K 2024-11-19T08:48:55,767 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:48:55,768 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/fa37aede3db84ff5a317cb65a3901c8b because midkey is the same as first or last row 2024-11-19T08:48:56,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:56,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:57,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:57,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:57,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on bc4de71ea654c058906436fa4960c66c 2024-11-19T08:48:57,761 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bc4de71ea654c058906436fa4960c66c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T08:48:57,767 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp/info/478ea90fa36d46d387ce9e9c9efcb9cc is 1080, key is row0028/info:/1732006135742/Put/seqid=0 2024-11-19T08:48:57,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741839_1015 (size=12509) 2024-11-19T08:48:57,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741839_1015 (size=12509) 2024-11-19T08:48:57,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bc4de71ea654c058906436fa4960c66c, server=3ab37fa97a98,43395,1732006124464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-19T08:48:57,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34592 deadline: 1732006147806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bc4de71ea654c058906436fa4960c66c, server=3ab37fa97a98,43395,1732006124464 2024-11-19T08:48:57,832 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c., hostname=3ab37fa97a98,43395,1732006124464, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c., hostname=3ab37fa97a98,43395,1732006124464, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bc4de71ea654c058906436fa4960c66c, server=3ab37fa97a98,43395,1732006124464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T08:48:57,833 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c., hostname=3ab37fa97a98,43395,1732006124464, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bc4de71ea654c058906436fa4960c66c, server=3ab37fa97a98,43395,1732006124464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T08:48:57,833 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c., hostname=3ab37fa97a98,43395,1732006124464, seqNum=2 because the exception is null or not the one we care about 2024-11-19T08:48:58,174 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp/info/478ea90fa36d46d387ce9e9c9efcb9cc 2024-11-19T08:48:58,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp/info/478ea90fa36d46d387ce9e9c9efcb9cc as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/478ea90fa36d46d387ce9e9c9efcb9cc 2024-11-19T08:48:58,187 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/478ea90fa36d46d387ce9e9c9efcb9cc, entries=7, sequenceid=44, filesize=12.2 K 2024-11-19T08:48:58,188 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for bc4de71ea654c058906436fa4960c66c in 427ms, sequenceid=44, compaction requested=true 2024-11-19T08:48:58,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bc4de71ea654c058906436fa4960c66c: 2024-11-19T08:48:58,188 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=50.3 K, sizeToCheck=16.0 K 2024-11-19T08:48:58,188 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:48:58,189 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/fa37aede3db84ff5a317cb65a3901c8b because midkey is the same as first or last row 2024-11-19T08:48:58,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bc4de71ea654c058906436fa4960c66c:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T08:48:58,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:48:58,189 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T08:48:58,190 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 51548 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T08:48:58,190 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1541): bc4de71ea654c058906436fa4960c66c/info is initiating minor compaction (all files) 2024-11-19T08:48:58,190 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bc4de71ea654c058906436fa4960c66c/info in TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c. 2024-11-19T08:48:58,190 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4472078a4cbf4120a4ca8bd8732aa98e, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/fa37aede3db84ff5a317cb65a3901c8b, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/478ea90fa36d46d387ce9e9c9efcb9cc] into tmpdir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp, totalSize=50.3 K 2024-11-19T08:48:58,191 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4472078a4cbf4120a4ca8bd8732aa98e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732006135649 2024-11-19T08:48:58,191 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting fa37aede3db84ff5a317cb65a3901c8b, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732006135699 2024-11-19T08:48:58,191 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting 478ea90fa36d46d387ce9e9c9efcb9cc, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1732006135742 2024-11-19T08:48:58,204 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bc4de71ea654c058906436fa4960c66c#info#compaction#57 average throughput is 17.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T08:48:58,205 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp/info/4251f2aa9aab4cf59eb94ebf178cc559 is 1080, key is row0001/info:/1732006135649/Put/seqid=0 2024-11-19T08:48:58,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741840_1016 (size=41747) 2024-11-19T08:48:58,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741840_1016 (size=41747) 2024-11-19T08:48:58,220 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp/info/4251f2aa9aab4cf59eb94ebf178cc559 as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4251f2aa9aab4cf59eb94ebf178cc559 2024-11-19T08:48:58,226 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bc4de71ea654c058906436fa4960c66c/info of bc4de71ea654c058906436fa4960c66c into 4251f2aa9aab4cf59eb94ebf178cc559(size=40.8 K), total size for store is 40.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T08:48:58,226 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bc4de71ea654c058906436fa4960c66c: 2024-11-19T08:48:58,226 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c., storeName=bc4de71ea654c058906436fa4960c66c/info, priority=13, startTime=1732006138189; duration=0sec 2024-11-19T08:48:58,227 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.8 K, sizeToCheck=16.0 K 2024-11-19T08:48:58,227 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:48:58,227 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4251f2aa9aab4cf59eb94ebf178cc559 because midkey is the same as first or last row 2024-11-19T08:48:58,227 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.8 K, sizeToCheck=16.0 K 2024-11-19T08:48:58,227 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:48:58,227 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4251f2aa9aab4cf59eb94ebf178cc559 because midkey is the same as first or last row 2024-11-19T08:48:58,227 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.8 K, sizeToCheck=16.0 K 2024-11-19T08:48:58,227 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:48:58,227 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4251f2aa9aab4cf59eb94ebf178cc559 because midkey is the same as first or last row 2024-11-19T08:48:58,227 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:48:58,227 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bc4de71ea654c058906436fa4960c66c:info 2024-11-19T08:48:58,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:58,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:59,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:48:59,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:00,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:00,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:01,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:01,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:02,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:02,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:03,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:03,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:04,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:04,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:05,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:05,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:06,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:06,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:07,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:07,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:07,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on bc4de71ea654c058906436fa4960c66c 2024-11-19T08:49:07,943 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bc4de71ea654c058906436fa4960c66c 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-19T08:49:07,948 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp/info/4ef26bfc9c3f47b6ba88c5e38d064f86 is 1080, key is row0035/info:/1732006137763/Put/seqid=0 2024-11-19T08:49:07,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741841_1017 (size=29761) 2024-11-19T08:49:07,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741841_1017 (size=29761) 2024-11-19T08:49:07,957 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=71 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp/info/4ef26bfc9c3f47b6ba88c5e38d064f86 2024-11-19T08:49:07,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bc4de71ea654c058906436fa4960c66c, server=3ab37fa97a98,43395,1732006124464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-19T08:49:07,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34592 deadline: 1732006157958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bc4de71ea654c058906436fa4960c66c, server=3ab37fa97a98,43395,1732006124464 2024-11-19T08:49:07,960 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c., hostname=3ab37fa97a98,43395,1732006124464, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c., hostname=3ab37fa97a98,43395,1732006124464, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bc4de71ea654c058906436fa4960c66c, server=3ab37fa97a98,43395,1732006124464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T08:49:07,960 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c., hostname=3ab37fa97a98,43395,1732006124464, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bc4de71ea654c058906436fa4960c66c, server=3ab37fa97a98,43395,1732006124464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T08:49:07,960 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c., hostname=3ab37fa97a98,43395,1732006124464, seqNum=2 because the exception is null or not the one we care about 2024-11-19T08:49:07,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp/info/4ef26bfc9c3f47b6ba88c5e38d064f86 as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4ef26bfc9c3f47b6ba88c5e38d064f86 2024-11-19T08:49:07,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4ef26bfc9c3f47b6ba88c5e38d064f86, entries=23, sequenceid=71, filesize=29.1 K 2024-11-19T08:49:07,970 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=6.30 KB/6456 for bc4de71ea654c058906436fa4960c66c in 28ms, sequenceid=71, compaction requested=false 2024-11-19T08:49:07,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bc4de71ea654c058906436fa4960c66c: 2024-11-19T08:49:07,970 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=69.8 K, sizeToCheck=16.0 K 2024-11-19T08:49:07,970 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:49:07,971 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4251f2aa9aab4cf59eb94ebf178cc559 because midkey is the same as first or last row 2024-11-19T08:49:08,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:08,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:09,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:09,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:10,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:10,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:11,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:11,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:12,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:12,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:13,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:13,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:14,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:14,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:14,284 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T08:49:15,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:15,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:16,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:16,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:17,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:17,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:18,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on bc4de71ea654c058906436fa4960c66c 2024-11-19T08:49:18,003 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bc4de71ea654c058906436fa4960c66c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T08:49:18,007 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp/info/d982c92a25b4484187c504717ff501dd is 1080, key is row0058/info:/1732006147944/Put/seqid=0 2024-11-19T08:49:18,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741842_1018 (size=12509) 2024-11-19T08:49:18,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741842_1018 (size=12509) 2024-11-19T08:49:18,012 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp/info/d982c92a25b4484187c504717ff501dd 2024-11-19T08:49:18,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp/info/d982c92a25b4484187c504717ff501dd as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/d982c92a25b4484187c504717ff501dd 2024-11-19T08:49:18,025 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/d982c92a25b4484187c504717ff501dd, entries=7, sequenceid=81, filesize=12.2 K 2024-11-19T08:49:18,026 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for bc4de71ea654c058906436fa4960c66c in 23ms, sequenceid=81, compaction requested=true 2024-11-19T08:49:18,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bc4de71ea654c058906436fa4960c66c: 2024-11-19T08:49:18,027 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=82.0 K, sizeToCheck=16.0 K 2024-11-19T08:49:18,027 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:49:18,027 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4251f2aa9aab4cf59eb94ebf178cc559 because midkey is the same as first or last row 2024-11-19T08:49:18,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bc4de71ea654c058906436fa4960c66c:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T08:49:18,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:18,027 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T08:49:18,028 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84017 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T08:49:18,028 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1541): bc4de71ea654c058906436fa4960c66c/info is initiating minor compaction (all files) 2024-11-19T08:49:18,028 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bc4de71ea654c058906436fa4960c66c/info in TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c. 2024-11-19T08:49:18,028 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4251f2aa9aab4cf59eb94ebf178cc559, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4ef26bfc9c3f47b6ba88c5e38d064f86, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/d982c92a25b4484187c504717ff501dd] into tmpdir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp, totalSize=82.0 K 2024-11-19T08:49:18,029 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4251f2aa9aab4cf59eb94ebf178cc559, keycount=34, bloomtype=ROW, size=40.8 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1732006135649 2024-11-19T08:49:18,029 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4ef26bfc9c3f47b6ba88c5e38d064f86, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=71, earliestPutTs=1732006137763 2024-11-19T08:49:18,029 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting d982c92a25b4484187c504717ff501dd, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732006147944 2024-11-19T08:49:18,043 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bc4de71ea654c058906436fa4960c66c#info#compaction#60 average throughput is 21.89 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T08:49:18,044 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp/info/ceb166b1116b4055acfd88a50aaf1de2 is 1080, key is row0001/info:/1732006135649/Put/seqid=0 2024-11-19T08:49:18,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741843_1019 (size=74301) 2024-11-19T08:49:18,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741843_1019 (size=74301) 2024-11-19T08:49:18,056 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/.tmp/info/ceb166b1116b4055acfd88a50aaf1de2 as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/ceb166b1116b4055acfd88a50aaf1de2 2024-11-19T08:49:18,063 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bc4de71ea654c058906436fa4960c66c/info of bc4de71ea654c058906436fa4960c66c into ceb166b1116b4055acfd88a50aaf1de2(size=72.6 K), total size for store is 72.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T08:49:18,063 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bc4de71ea654c058906436fa4960c66c: 2024-11-19T08:49:18,063 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c., storeName=bc4de71ea654c058906436fa4960c66c/info, priority=13, startTime=1732006158027; duration=0sec 2024-11-19T08:49:18,063 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-19T08:49:18,063 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:49:18,063 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-19T08:49:18,063 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:49:18,063 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-19T08:49:18,063 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T08:49:18,064 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:18,064 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:18,064 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bc4de71ea654c058906436fa4960c66c:info 2024-11-19T08:49:18,066 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38369 {}] assignment.AssignmentManager(1363): Split request from 3ab37fa97a98,43395,1732006124464, parent={ENCODED => bc4de71ea654c058906436fa4960c66c, NAME => 'TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-19T08:49:18,071 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38369 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=3ab37fa97a98,43395,1732006124464 2024-11-19T08:49:18,074 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38369 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bc4de71ea654c058906436fa4960c66c, daughterA=e9a2694a1253a4604bd998ea0bb567ca, daughterB=6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:18,075 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bc4de71ea654c058906436fa4960c66c, daughterA=e9a2694a1253a4604bd998ea0bb567ca, daughterB=6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:18,075 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bc4de71ea654c058906436fa4960c66c, daughterA=e9a2694a1253a4604bd998ea0bb567ca, daughterB=6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:18,075 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bc4de71ea654c058906436fa4960c66c, daughterA=e9a2694a1253a4604bd998ea0bb567ca, daughterB=6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:18,084 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bc4de71ea654c058906436fa4960c66c, UNASSIGN}] 2024-11-19T08:49:18,085 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bc4de71ea654c058906436fa4960c66c, UNASSIGN 2024-11-19T08:49:18,087 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=bc4de71ea654c058906436fa4960c66c, regionState=CLOSING, regionLocation=3ab37fa97a98,43395,1732006124464 2024-11-19T08:49:18,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bc4de71ea654c058906436fa4960c66c, UNASSIGN because future has completed 2024-11-19T08:49:18,090 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-19T08:49:18,090 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure bc4de71ea654c058906436fa4960c66c, server=3ab37fa97a98,43395,1732006124464}] 2024-11-19T08:49:18,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:18,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:18,247 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close bc4de71ea654c058906436fa4960c66c 2024-11-19T08:49:18,247 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-19T08:49:18,248 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing bc4de71ea654c058906436fa4960c66c, disabling compactions & flushes 2024-11-19T08:49:18,248 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c. 2024-11-19T08:49:18,248 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c. 2024-11-19T08:49:18,248 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c. after waiting 0 ms 2024-11-19T08:49:18,248 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c. 2024-11-19T08:49:18,249 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4472078a4cbf4120a4ca8bd8732aa98e, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/fa37aede3db84ff5a317cb65a3901c8b, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4251f2aa9aab4cf59eb94ebf178cc559, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/478ea90fa36d46d387ce9e9c9efcb9cc, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4ef26bfc9c3f47b6ba88c5e38d064f86, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/d982c92a25b4484187c504717ff501dd] to archive 2024-11-19T08:49:18,250 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T08:49:18,252 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4472078a4cbf4120a4ca8bd8732aa98e to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4472078a4cbf4120a4ca8bd8732aa98e 2024-11-19T08:49:18,253 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/fa37aede3db84ff5a317cb65a3901c8b to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/fa37aede3db84ff5a317cb65a3901c8b 2024-11-19T08:49:18,255 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4251f2aa9aab4cf59eb94ebf178cc559 to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4251f2aa9aab4cf59eb94ebf178cc559 2024-11-19T08:49:18,256 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/478ea90fa36d46d387ce9e9c9efcb9cc to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/478ea90fa36d46d387ce9e9c9efcb9cc 2024-11-19T08:49:18,257 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4ef26bfc9c3f47b6ba88c5e38d064f86 to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/4ef26bfc9c3f47b6ba88c5e38d064f86 2024-11-19T08:49:18,258 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/d982c92a25b4484187c504717ff501dd to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/d982c92a25b4484187c504717ff501dd 2024-11-19T08:49:18,265 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=1 2024-11-19T08:49:18,265 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c. 2024-11-19T08:49:18,265 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for bc4de71ea654c058906436fa4960c66c: Waiting for close lock at 1732006158248Running coprocessor pre-close hooks at 1732006158248Disabling compacts and flushes for region at 1732006158248Disabling writes for close at 1732006158248Writing region close event to WAL at 1732006158261 (+13 ms)Running coprocessor post-close hooks at 1732006158265 (+4 ms)Closed at 1732006158265 2024-11-19T08:49:18,268 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed bc4de71ea654c058906436fa4960c66c 2024-11-19T08:49:18,269 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=bc4de71ea654c058906436fa4960c66c, regionState=CLOSED 2024-11-19T08:49:18,271 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure bc4de71ea654c058906436fa4960c66c, server=3ab37fa97a98,43395,1732006124464 because future has completed 2024-11-19T08:49:18,274 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-19T08:49:18,274 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure bc4de71ea654c058906436fa4960c66c, server=3ab37fa97a98,43395,1732006124464 in 182 msec 2024-11-19T08:49:18,276 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-19T08:49:18,276 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bc4de71ea654c058906436fa4960c66c, UNASSIGN in 191 msec 2024-11-19T08:49:18,283 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:49:18,286 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 1 storefiles, region=bc4de71ea654c058906436fa4960c66c, threads=1 2024-11-19T08:49:18,288 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/ceb166b1116b4055acfd88a50aaf1de2 for region: bc4de71ea654c058906436fa4960c66c 2024-11-19T08:49:18,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741844_1020 (size=27) 2024-11-19T08:49:18,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741844_1020 (size=27) 2024-11-19T08:49:18,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741845_1021 (size=27) 2024-11-19T08:49:18,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741845_1021 (size=27) 2024-11-19T08:49:18,313 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/ceb166b1116b4055acfd88a50aaf1de2 for region: bc4de71ea654c058906436fa4960c66c 2024-11-19T08:49:18,316 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region bc4de71ea654c058906436fa4960c66c Daughter A: [hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/e9a2694a1253a4604bd998ea0bb567ca/info/ceb166b1116b4055acfd88a50aaf1de2.bc4de71ea654c058906436fa4960c66c] storefiles, Daughter B: [hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/ceb166b1116b4055acfd88a50aaf1de2.bc4de71ea654c058906436fa4960c66c] storefiles. 2024-11-19T08:49:18,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741846_1022 (size=71) 2024-11-19T08:49:18,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741846_1022 (size=71) 2024-11-19T08:49:18,328 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:49:18,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741847_1023 (size=71) 2024-11-19T08:49:18,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741847_1023 (size=71) 2024-11-19T08:49:18,341 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:49:18,349 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/e9a2694a1253a4604bd998ea0bb567ca/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=-1 2024-11-19T08:49:18,351 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=-1 2024-11-19T08:49:18,354 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732006158354"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732006158354"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732006158354"}]},"ts":"1732006158354"} 2024-11-19T08:49:18,355 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732006158354"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732006158354"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732006158354"}]},"ts":"1732006158354"} 2024-11-19T08:49:18,355 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732006158354"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732006158354"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732006158354"}]},"ts":"1732006158354"} 2024-11-19T08:49:18,374 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e9a2694a1253a4604bd998ea0bb567ca, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6d1ec013cc486e1d83824c31ac9dff5c, ASSIGN}] 2024-11-19T08:49:18,376 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6d1ec013cc486e1d83824c31ac9dff5c, ASSIGN 2024-11-19T08:49:18,376 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e9a2694a1253a4604bd998ea0bb567ca, ASSIGN 2024-11-19T08:49:18,377 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e9a2694a1253a4604bd998ea0bb567ca, ASSIGN; state=SPLITTING_NEW, location=3ab37fa97a98,43395,1732006124464; forceNewPlan=false, retain=false 2024-11-19T08:49:18,377 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6d1ec013cc486e1d83824c31ac9dff5c, ASSIGN; state=SPLITTING_NEW, location=3ab37fa97a98,43395,1732006124464; forceNewPlan=false, retain=false 2024-11-19T08:49:18,528 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=6d1ec013cc486e1d83824c31ac9dff5c, regionState=OPENING, regionLocation=3ab37fa97a98,43395,1732006124464 2024-11-19T08:49:18,528 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=e9a2694a1253a4604bd998ea0bb567ca, regionState=OPENING, regionLocation=3ab37fa97a98,43395,1732006124464 2024-11-19T08:49:18,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e9a2694a1253a4604bd998ea0bb567ca, ASSIGN because future has completed 2024-11-19T08:49:18,531 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure e9a2694a1253a4604bd998ea0bb567ca, server=3ab37fa97a98,43395,1732006124464}] 2024-11-19T08:49:18,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6d1ec013cc486e1d83824c31ac9dff5c, ASSIGN because future has completed 2024-11-19T08:49:18,532 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6d1ec013cc486e1d83824c31ac9dff5c, server=3ab37fa97a98,43395,1732006124464}] 2024-11-19T08:49:18,687 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c. 2024-11-19T08:49:18,687 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 6d1ec013cc486e1d83824c31ac9dff5c, NAME => 'TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-19T08:49:18,688 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:18,688 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:49:18,688 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:18,688 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:18,691 INFO [StoreOpener-6d1ec013cc486e1d83824c31ac9dff5c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:18,692 INFO [StoreOpener-6d1ec013cc486e1d83824c31ac9dff5c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6d1ec013cc486e1d83824c31ac9dff5c columnFamilyName info 2024-11-19T08:49:18,692 DEBUG [StoreOpener-6d1ec013cc486e1d83824c31ac9dff5c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:49:18,706 DEBUG [StoreOpener-6d1ec013cc486e1d83824c31ac9dff5c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/ceb166b1116b4055acfd88a50aaf1de2.bc4de71ea654c058906436fa4960c66c->hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/ceb166b1116b4055acfd88a50aaf1de2-top 2024-11-19T08:49:18,707 INFO [StoreOpener-6d1ec013cc486e1d83824c31ac9dff5c-1 {}] regionserver.HStore(327): Store=6d1ec013cc486e1d83824c31ac9dff5c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:49:18,707 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:18,708 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:18,709 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:18,710 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:18,710 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:18,712 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:18,713 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 6d1ec013cc486e1d83824c31ac9dff5c; next sequenceid=86; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=727110, jitterRate=-0.07543264329433441}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T08:49:18,713 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:18,713 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 6d1ec013cc486e1d83824c31ac9dff5c: Running coprocessor pre-open hook at 1732006158688Writing region info on filesystem at 1732006158688Initializing all the Stores at 1732006158690 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006158690Cleaning up temporary data from old regions at 1732006158710 (+20 ms)Running coprocessor post-open hooks at 1732006158713 (+3 ms)Region opened successfully at 1732006158713 2024-11-19T08:49:18,714 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c., pid=13, masterSystemTime=1732006158683 2024-11-19T08:49:18,715 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 6d1ec013cc486e1d83824c31ac9dff5c:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T08:49:18,715 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-19T08:49:18,715 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:18,715 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c. 2024-11-19T08:49:18,715 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1541): 6d1ec013cc486e1d83824c31ac9dff5c/info is initiating minor compaction (all files) 2024-11-19T08:49:18,715 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6d1ec013cc486e1d83824c31ac9dff5c/info in TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c. 2024-11-19T08:49:18,716 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/ceb166b1116b4055acfd88a50aaf1de2.bc4de71ea654c058906436fa4960c66c->hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/ceb166b1116b4055acfd88a50aaf1de2-top] into tmpdir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp, totalSize=72.6 K 2024-11-19T08:49:18,716 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting ceb166b1116b4055acfd88a50aaf1de2.bc4de71ea654c058906436fa4960c66c, keycount=32, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732006135649 2024-11-19T08:49:18,717 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c. 2024-11-19T08:49:18,717 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c. 2024-11-19T08:49:18,718 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca. 2024-11-19T08:49:18,718 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => e9a2694a1253a4604bd998ea0bb567ca, NAME => 'TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-19T08:49:18,718 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling e9a2694a1253a4604bd998ea0bb567ca 2024-11-19T08:49:18,718 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:49:18,718 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for e9a2694a1253a4604bd998ea0bb567ca 2024-11-19T08:49:18,718 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for e9a2694a1253a4604bd998ea0bb567ca 2024-11-19T08:49:18,719 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=6d1ec013cc486e1d83824c31ac9dff5c, regionState=OPEN, openSeqNum=86, regionLocation=3ab37fa97a98,43395,1732006124464 2024-11-19T08:49:18,719 INFO [StoreOpener-e9a2694a1253a4604bd998ea0bb567ca-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e9a2694a1253a4604bd998ea0bb567ca 2024-11-19T08:49:18,720 INFO [StoreOpener-e9a2694a1253a4604bd998ea0bb567ca-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e9a2694a1253a4604bd998ea0bb567ca columnFamilyName info 2024-11-19T08:49:18,720 DEBUG [StoreOpener-e9a2694a1253a4604bd998ea0bb567ca-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:49:18,721 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-19T08:49:18,721 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-19T08:49:18,722 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-19T08:49:18,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6d1ec013cc486e1d83824c31ac9dff5c, server=3ab37fa97a98,43395,1732006124464 because future has completed 2024-11-19T08:49:18,725 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-19T08:49:18,726 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 6d1ec013cc486e1d83824c31ac9dff5c, server=3ab37fa97a98,43395,1732006124464 in 191 msec 2024-11-19T08:49:18,727 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6d1ec013cc486e1d83824c31ac9dff5c, ASSIGN in 352 msec 2024-11-19T08:49:18,732 DEBUG [StoreOpener-e9a2694a1253a4604bd998ea0bb567ca-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/e9a2694a1253a4604bd998ea0bb567ca/info/ceb166b1116b4055acfd88a50aaf1de2.bc4de71ea654c058906436fa4960c66c->hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/ceb166b1116b4055acfd88a50aaf1de2-bottom 2024-11-19T08:49:18,732 INFO [StoreOpener-e9a2694a1253a4604bd998ea0bb567ca-1 {}] regionserver.HStore(327): Store=e9a2694a1253a4604bd998ea0bb567ca/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:49:18,732 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for e9a2694a1253a4604bd998ea0bb567ca 2024-11-19T08:49:18,733 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/e9a2694a1253a4604bd998ea0bb567ca 2024-11-19T08:49:18,734 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/e9a2694a1253a4604bd998ea0bb567ca 2024-11-19T08:49:18,735 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for e9a2694a1253a4604bd998ea0bb567ca 2024-11-19T08:49:18,735 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for e9a2694a1253a4604bd998ea0bb567ca 2024-11-19T08:49:18,736 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6d1ec013cc486e1d83824c31ac9dff5c#info#compaction#61 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T08:49:18,736 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/792d261b68ee4547b50e945de3663630 is 1080, key is row0062/info:/1732006147954/Put/seqid=0 2024-11-19T08:49:18,736 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for e9a2694a1253a4604bd998ea0bb567ca 2024-11-19T08:49:18,737 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened e9a2694a1253a4604bd998ea0bb567ca; next sequenceid=86; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=710583, jitterRate=-0.09644792973995209}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T08:49:18,737 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e9a2694a1253a4604bd998ea0bb567ca 2024-11-19T08:49:18,738 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for e9a2694a1253a4604bd998ea0bb567ca: Running coprocessor pre-open hook at 1732006158719Writing region info on filesystem at 1732006158719Initializing all the Stores at 1732006158719Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006158719Cleaning up temporary data from old regions at 1732006158735 (+16 ms)Running coprocessor post-open hooks at 1732006158737 (+2 ms)Region opened successfully at 1732006158737 2024-11-19T08:49:18,738 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca., pid=12, masterSystemTime=1732006158683 2024-11-19T08:49:18,738 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store e9a2694a1253a4604bd998ea0bb567ca:info, priority=-2147483648, current under compaction store size is 2 2024-11-19T08:49:18,738 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:18,739 DEBUG [RS:0;3ab37fa97a98:43395-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-19T08:49:18,739 INFO [RS:0;3ab37fa97a98:43395-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca. 2024-11-19T08:49:18,739 DEBUG [RS:0;3ab37fa97a98:43395-longCompactions-0 {}] regionserver.HStore(1541): e9a2694a1253a4604bd998ea0bb567ca/info is initiating minor compaction (all files) 2024-11-19T08:49:18,739 INFO [RS:0;3ab37fa97a98:43395-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e9a2694a1253a4604bd998ea0bb567ca/info in TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca. 2024-11-19T08:49:18,740 INFO [RS:0;3ab37fa97a98:43395-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/e9a2694a1253a4604bd998ea0bb567ca/info/ceb166b1116b4055acfd88a50aaf1de2.bc4de71ea654c058906436fa4960c66c->hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/ceb166b1116b4055acfd88a50aaf1de2-bottom] into tmpdir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/e9a2694a1253a4604bd998ea0bb567ca/.tmp, totalSize=72.6 K 2024-11-19T08:49:18,740 DEBUG [RS:0;3ab37fa97a98:43395-longCompactions-0 {}] compactions.Compactor(225): Compacting ceb166b1116b4055acfd88a50aaf1de2.bc4de71ea654c058906436fa4960c66c, keycount=32, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732006135649 2024-11-19T08:49:18,741 DEBUG [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca. 2024-11-19T08:49:18,741 INFO [RS_OPEN_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca. 2024-11-19T08:49:18,741 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=e9a2694a1253a4604bd998ea0bb567ca, regionState=OPEN, openSeqNum=86, regionLocation=3ab37fa97a98,43395,1732006124464 2024-11-19T08:49:18,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741848_1024 (size=8260) 2024-11-19T08:49:18,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741848_1024 (size=8260) 2024-11-19T08:49:18,742 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/.tmp/info/0817bfa06daf492e96f31943dbd22469 is 193, key is TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c./info:regioninfo/1732006158719/Put/seqid=0 2024-11-19T08:49:18,747 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure e9a2694a1253a4604bd998ea0bb567ca, server=3ab37fa97a98,43395,1732006124464 because future has completed 2024-11-19T08:49:18,748 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/792d261b68ee4547b50e945de3663630 as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/792d261b68ee4547b50e945de3663630 2024-11-19T08:49:18,755 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 6d1ec013cc486e1d83824c31ac9dff5c/info of 6d1ec013cc486e1d83824c31ac9dff5c into 792d261b68ee4547b50e945de3663630(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T08:49:18,755 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:18,756 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c., storeName=6d1ec013cc486e1d83824c31ac9dff5c/info, priority=15, startTime=1732006158714; duration=0sec 2024-11-19T08:49:18,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741849_1025 (size=9882) 2024-11-19T08:49:18,756 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:18,756 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6d1ec013cc486e1d83824c31ac9dff5c:info 2024-11-19T08:49:18,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741849_1025 (size=9882) 2024-11-19T08:49:18,757 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/.tmp/info/0817bfa06daf492e96f31943dbd22469 2024-11-19T08:49:18,762 INFO [RS:0;3ab37fa97a98:43395-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e9a2694a1253a4604bd998ea0bb567ca#info#compaction#63 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T08:49:18,762 DEBUG [RS:0;3ab37fa97a98:43395-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/e9a2694a1253a4604bd998ea0bb567ca/.tmp/info/236b26db02b3476d875d3eddc795f524 is 1080, key is row0001/info:/1732006135649/Put/seqid=0 2024-11-19T08:49:18,763 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-19T08:49:18,763 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure e9a2694a1253a4604bd998ea0bb567ca, server=3ab37fa97a98,43395,1732006124464 in 229 msec 2024-11-19T08:49:18,766 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-19T08:49:18,766 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e9a2694a1253a4604bd998ea0bb567ca, ASSIGN in 389 msec 2024-11-19T08:49:18,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741850_1026 (size=70862) 2024-11-19T08:49:18,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741850_1026 (size=70862) 2024-11-19T08:49:18,768 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bc4de71ea654c058906436fa4960c66c, daughterA=e9a2694a1253a4604bd998ea0bb567ca, daughterB=6d1ec013cc486e1d83824c31ac9dff5c in 695 msec 2024-11-19T08:49:18,783 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/.tmp/ns/0472981690934509851faaf2bf4c6ffc is 43, key is default/ns:d/1732006125510/Put/seqid=0 2024-11-19T08:49:18,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741851_1027 (size=5153) 2024-11-19T08:49:18,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741851_1027 (size=5153) 2024-11-19T08:49:18,790 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/.tmp/ns/0472981690934509851faaf2bf4c6ffc 2024-11-19T08:49:18,808 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/.tmp/table/211008e635e547e1b2364db5621903de is 65, key is TestLogRolling-testLogRolling/table:state/1732006125977/Put/seqid=0 2024-11-19T08:49:18,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741852_1028 (size=5340) 2024-11-19T08:49:18,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741852_1028 (size=5340) 2024-11-19T08:49:18,813 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/.tmp/table/211008e635e547e1b2364db5621903de 2024-11-19T08:49:18,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/.tmp/info/0817bfa06daf492e96f31943dbd22469 as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/info/0817bfa06daf492e96f31943dbd22469 2024-11-19T08:49:18,823 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/info/0817bfa06daf492e96f31943dbd22469, entries=30, sequenceid=17, filesize=9.7 K 2024-11-19T08:49:18,824 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/.tmp/ns/0472981690934509851faaf2bf4c6ffc as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/ns/0472981690934509851faaf2bf4c6ffc 2024-11-19T08:49:18,829 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/ns/0472981690934509851faaf2bf4c6ffc, entries=2, sequenceid=17, filesize=5.0 K 2024-11-19T08:49:18,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/.tmp/table/211008e635e547e1b2364db5621903de as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/table/211008e635e547e1b2364db5621903de 2024-11-19T08:49:18,834 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/table/211008e635e547e1b2364db5621903de, entries=2, sequenceid=17, filesize=5.2 K 2024-11-19T08:49:18,835 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 114ms, sequenceid=17, compaction requested=false 2024-11-19T08:49:18,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-19T08:49:19,173 DEBUG [RS:0;3ab37fa97a98:43395-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/e9a2694a1253a4604bd998ea0bb567ca/.tmp/info/236b26db02b3476d875d3eddc795f524 as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/e9a2694a1253a4604bd998ea0bb567ca/info/236b26db02b3476d875d3eddc795f524 2024-11-19T08:49:19,180 INFO [RS:0;3ab37fa97a98:43395-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in e9a2694a1253a4604bd998ea0bb567ca/info of e9a2694a1253a4604bd998ea0bb567ca into 236b26db02b3476d875d3eddc795f524(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T08:49:19,180 DEBUG [RS:0;3ab37fa97a98:43395-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e9a2694a1253a4604bd998ea0bb567ca: 2024-11-19T08:49:19,180 INFO [RS:0;3ab37fa97a98:43395-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca., storeName=e9a2694a1253a4604bd998ea0bb567ca/info, priority=15, startTime=1732006158738; duration=0sec 2024-11-19T08:49:19,180 DEBUG [RS:0;3ab37fa97a98:43395-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:19,180 DEBUG [RS:0;3ab37fa97a98:43395-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e9a2694a1253a4604bd998ea0bb567ca:info 2024-11-19T08:49:19,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:19,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:20,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34592 deadline: 1732006170004, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c. is not online on 3ab37fa97a98,43395,1732006124464 2024-11-19T08:49:20,006 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c., hostname=3ab37fa97a98,43395,1732006124464, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c., hostname=3ab37fa97a98,43395,1732006124464, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c. is not online on 3ab37fa97a98,43395,1732006124464 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T08:49:20,006 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c., hostname=3ab37fa97a98,43395,1732006124464, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c. is not online on 3ab37fa97a98,43395,1732006124464 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T08:49:20,006 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732006125610.bc4de71ea654c058906436fa4960c66c., hostname=3ab37fa97a98,43395,1732006124464, seqNum=2 from cache 2024-11-19T08:49:20,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:20,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:21,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:21,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:22,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:22,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:23,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:23,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:23,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,294 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,294 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,302 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,811 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T08:49:23,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,846 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:23,850 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:24,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:24,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:25,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:25,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:25,539 INFO [master/3ab37fa97a98:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T08:49:25,539 INFO [master/3ab37fa97a98:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T08:49:26,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:26,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:27,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:27,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:28,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:28,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:29,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:29,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:30,038 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0065', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c., hostname=3ab37fa97a98,43395,1732006124464, seqNum=86] 2024-11-19T08:49:30,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:30,051 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6d1ec013cc486e1d83824c31ac9dff5c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T08:49:30,056 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/e481b2e614b2489f93761280dc684a3f is 1080, key is row0065/info:/1732006170039/Put/seqid=0 2024-11-19T08:49:30,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741853_1029 (size=12509) 2024-11-19T08:49:30,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741853_1029 (size=12509) 2024-11-19T08:49:30,062 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/e481b2e614b2489f93761280dc684a3f 2024-11-19T08:49:30,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/e481b2e614b2489f93761280dc684a3f as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/e481b2e614b2489f93761280dc684a3f 2024-11-19T08:49:30,077 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/e481b2e614b2489f93761280dc684a3f, entries=7, sequenceid=96, filesize=12.2 K 2024-11-19T08:49:30,078 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 6d1ec013cc486e1d83824c31ac9dff5c in 27ms, sequenceid=96, compaction requested=false 2024-11-19T08:49:30,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:30,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:30,080 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6d1ec013cc486e1d83824c31ac9dff5c 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-19T08:49:30,084 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/c2f148530ef541a48510ba76a2fe7202 is 1080, key is row0072/info:/1732006170052/Put/seqid=0 2024-11-19T08:49:30,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741854_1030 (size=18987) 2024-11-19T08:49:30,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741854_1030 (size=18987) 2024-11-19T08:49:30,094 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=112 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/c2f148530ef541a48510ba76a2fe7202 2024-11-19T08:49:30,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/c2f148530ef541a48510ba76a2fe7202 as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/c2f148530ef541a48510ba76a2fe7202 2024-11-19T08:49:30,106 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/c2f148530ef541a48510ba76a2fe7202, entries=13, sequenceid=112, filesize=18.5 K 2024-11-19T08:49:30,107 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for 6d1ec013cc486e1d83824c31ac9dff5c in 27ms, sequenceid=112, compaction requested=true 2024-11-19T08:49:30,107 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:30,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6d1ec013cc486e1d83824c31ac9dff5c:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T08:49:30,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:30,107 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T08:49:30,108 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39756 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T08:49:30,108 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1541): 6d1ec013cc486e1d83824c31ac9dff5c/info is initiating minor compaction (all files) 2024-11-19T08:49:30,108 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6d1ec013cc486e1d83824c31ac9dff5c/info in TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c. 2024-11-19T08:49:30,109 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/792d261b68ee4547b50e945de3663630, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/e481b2e614b2489f93761280dc684a3f, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/c2f148530ef541a48510ba76a2fe7202] into tmpdir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp, totalSize=38.8 K 2024-11-19T08:49:30,109 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting 792d261b68ee4547b50e945de3663630, keycount=3, bloomtype=ROW, size=8.1 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732006147954 2024-11-19T08:49:30,109 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting e481b2e614b2489f93761280dc684a3f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732006170039 2024-11-19T08:49:30,110 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting c2f148530ef541a48510ba76a2fe7202, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1732006170052 2024-11-19T08:49:30,121 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6d1ec013cc486e1d83824c31ac9dff5c#info#compaction#68 average throughput is 23.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T08:49:30,121 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/6840bfdd1ab1471299829f1514163c1a is 1080, key is row0062/info:/1732006147954/Put/seqid=0 2024-11-19T08:49:30,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741855_1031 (size=29932) 2024-11-19T08:49:30,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741855_1031 (size=29932) 2024-11-19T08:49:30,132 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/6840bfdd1ab1471299829f1514163c1a as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/6840bfdd1ab1471299829f1514163c1a 2024-11-19T08:49:30,138 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6d1ec013cc486e1d83824c31ac9dff5c/info of 6d1ec013cc486e1d83824c31ac9dff5c into 6840bfdd1ab1471299829f1514163c1a(size=29.2 K), total size for store is 29.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T08:49:30,138 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:30,138 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c., storeName=6d1ec013cc486e1d83824c31ac9dff5c/info, priority=13, startTime=1732006170107; duration=0sec 2024-11-19T08:49:30,138 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:30,138 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6d1ec013cc486e1d83824c31ac9dff5c:info 2024-11-19T08:49:30,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:30,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:30,447 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-19T08:49:31,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:31,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:32,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:32,108 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6d1ec013cc486e1d83824c31ac9dff5c 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-19T08:49:32,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/6658d0c900704848ab6c01ec4c34351b is 1080, key is row0085/info:/1732006170081/Put/seqid=0 2024-11-19T08:49:32,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741856_1032 (size=18988) 2024-11-19T08:49:32,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741856_1032 (size=18988) 2024-11-19T08:49:32,120 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/6658d0c900704848ab6c01ec4c34351b 2024-11-19T08:49:32,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/6658d0c900704848ab6c01ec4c34351b as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/6658d0c900704848ab6c01ec4c34351b 2024-11-19T08:49:32,141 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/6658d0c900704848ab6c01ec4c34351b, entries=13, sequenceid=129, filesize=18.5 K 2024-11-19T08:49:32,142 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=15.76 KB/16140 for 6d1ec013cc486e1d83824c31ac9dff5c in 34ms, sequenceid=129, compaction requested=false 2024-11-19T08:49:32,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:32,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:32,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6d1ec013cc486e1d83824c31ac9dff5c 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-19T08:49:32,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/fd60410bbe2b41018ec005bde0c58d85 is 1080, key is row0098/info:/1732006172110/Put/seqid=0 2024-11-19T08:49:32,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741857_1033 (size=23316) 2024-11-19T08:49:32,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741857_1033 (size=23316) 2024-11-19T08:49:32,154 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=149 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/fd60410bbe2b41018ec005bde0c58d85 2024-11-19T08:49:32,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/fd60410bbe2b41018ec005bde0c58d85 as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/fd60410bbe2b41018ec005bde0c58d85 2024-11-19T08:49:32,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/fd60410bbe2b41018ec005bde0c58d85, entries=17, sequenceid=149, filesize=22.8 K 2024-11-19T08:49:32,187 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=9.46 KB/9684 for 6d1ec013cc486e1d83824c31ac9dff5c in 43ms, sequenceid=149, compaction requested=true 2024-11-19T08:49:32,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:32,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6d1ec013cc486e1d83824c31ac9dff5c:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T08:49:32,188 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T08:49:32,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:32,189 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 72236 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T08:49:32,189 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1541): 6d1ec013cc486e1d83824c31ac9dff5c/info is initiating minor compaction (all files) 2024-11-19T08:49:32,189 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6d1ec013cc486e1d83824c31ac9dff5c/info in TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c. 2024-11-19T08:49:32,190 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/6840bfdd1ab1471299829f1514163c1a, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/6658d0c900704848ab6c01ec4c34351b, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/fd60410bbe2b41018ec005bde0c58d85] into tmpdir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp, totalSize=70.5 K 2024-11-19T08:49:32,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:32,190 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6d1ec013cc486e1d83824c31ac9dff5c 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-19T08:49:32,190 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6840bfdd1ab1471299829f1514163c1a, keycount=23, bloomtype=ROW, size=29.2 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1732006147954 2024-11-19T08:49:32,191 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6658d0c900704848ab6c01ec4c34351b, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732006170081 2024-11-19T08:49:32,191 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting fd60410bbe2b41018ec005bde0c58d85, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=149, earliestPutTs=1732006172110 2024-11-19T08:49:32,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/08b171a0c1224092b182f4792e532107 is 1080, key is row0115/info:/1732006172145/Put/seqid=0 2024-11-19T08:49:32,213 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6d1ec013cc486e1d83824c31ac9dff5c#info#compaction#72 average throughput is 27.19 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T08:49:32,214 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/7f9bf188403247e8b202aacd2c53245e is 1080, key is row0062/info:/1732006147954/Put/seqid=0 2024-11-19T08:49:32,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741858_1034 (size=15750) 2024-11-19T08:49:32,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741858_1034 (size=15750) 2024-11-19T08:49:32,222 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=162 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/08b171a0c1224092b182f4792e532107 2024-11-19T08:49:32,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741859_1035 (size=62466) 2024-11-19T08:49:32,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741859_1035 (size=62466) 2024-11-19T08:49:32,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/08b171a0c1224092b182f4792e532107 as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/08b171a0c1224092b182f4792e532107 2024-11-19T08:49:32,233 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/7f9bf188403247e8b202aacd2c53245e as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/7f9bf188403247e8b202aacd2c53245e 2024-11-19T08:49:32,235 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/08b171a0c1224092b182f4792e532107, entries=10, sequenceid=162, filesize=15.4 K 2024-11-19T08:49:32,236 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=4.20 KB/4304 for 6d1ec013cc486e1d83824c31ac9dff5c in 46ms, sequenceid=162, compaction requested=false 2024-11-19T08:49:32,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:32,241 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6d1ec013cc486e1d83824c31ac9dff5c/info of 6d1ec013cc486e1d83824c31ac9dff5c into 7f9bf188403247e8b202aacd2c53245e(size=61.0 K), total size for store is 76.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T08:49:32,241 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:32,241 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c., storeName=6d1ec013cc486e1d83824c31ac9dff5c/info, priority=13, startTime=1732006172188; duration=0sec 2024-11-19T08:49:32,241 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:32,241 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6d1ec013cc486e1d83824c31ac9dff5c:info 2024-11-19T08:49:32,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:32,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:33,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:33,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:34,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:34,207 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6d1ec013cc486e1d83824c31ac9dff5c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T08:49:34,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/12e3407c9e5f48478ac87aa5a6eedfca is 1080, key is row0125/info:/1732006172192/Put/seqid=0 2024-11-19T08:49:34,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741860_1036 (size=12516) 2024-11-19T08:49:34,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741860_1036 (size=12516) 2024-11-19T08:49:34,222 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/12e3407c9e5f48478ac87aa5a6eedfca 2024-11-19T08:49:34,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/12e3407c9e5f48478ac87aa5a6eedfca as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/12e3407c9e5f48478ac87aa5a6eedfca 2024-11-19T08:49:34,234 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/12e3407c9e5f48478ac87aa5a6eedfca, entries=7, sequenceid=173, filesize=12.2 K 2024-11-19T08:49:34,235 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 6d1ec013cc486e1d83824c31ac9dff5c in 28ms, sequenceid=173, compaction requested=true 2024-11-19T08:49:34,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:34,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6d1ec013cc486e1d83824c31ac9dff5c:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T08:49:34,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:34,235 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T08:49:34,236 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 90732 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T08:49:34,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:34,236 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1541): 6d1ec013cc486e1d83824c31ac9dff5c/info is initiating minor compaction (all files) 2024-11-19T08:49:34,236 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6d1ec013cc486e1d83824c31ac9dff5c 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-19T08:49:34,236 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6d1ec013cc486e1d83824c31ac9dff5c/info in TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c. 2024-11-19T08:49:34,236 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/7f9bf188403247e8b202aacd2c53245e, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/08b171a0c1224092b182f4792e532107, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/12e3407c9e5f48478ac87aa5a6eedfca] into tmpdir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp, totalSize=88.6 K 2024-11-19T08:49:34,237 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7f9bf188403247e8b202aacd2c53245e, keycount=53, bloomtype=ROW, size=61.0 K, encoding=NONE, compression=NONE, seqNum=149, earliestPutTs=1732006147954 2024-11-19T08:49:34,237 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting 08b171a0c1224092b182f4792e532107, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732006172145 2024-11-19T08:49:34,238 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting 12e3407c9e5f48478ac87aa5a6eedfca, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732006172192 2024-11-19T08:49:34,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/dd253c29253e4b36a10fa473a0b1f91c is 1080, key is row0132/info:/1732006174208/Put/seqid=0 2024-11-19T08:49:34,250 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6d1ec013cc486e1d83824c31ac9dff5c#info#compaction#75 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T08:49:34,251 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/e645688dcea3485e96767f70a8b7b9e0 is 1080, key is row0062/info:/1732006147954/Put/seqid=0 2024-11-19T08:49:34,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741861_1037 (size=20078) 2024-11-19T08:49:34,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741861_1037 (size=20078) 2024-11-19T08:49:34,253 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=190 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/dd253c29253e4b36a10fa473a0b1f91c 2024-11-19T08:49:34,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:34,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:34,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/dd253c29253e4b36a10fa473a0b1f91c as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/dd253c29253e4b36a10fa473a0b1f91c 2024-11-19T08:49:34,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741862_1038 (size=80967) 2024-11-19T08:49:34,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741862_1038 (size=80967) 2024-11-19T08:49:34,268 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/dd253c29253e4b36a10fa473a0b1f91c, entries=14, sequenceid=190, filesize=19.6 K 2024-11-19T08:49:34,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for 6d1ec013cc486e1d83824c31ac9dff5c in 33ms, sequenceid=190, compaction requested=false 2024-11-19T08:49:34,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:34,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:34,271 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6d1ec013cc486e1d83824c31ac9dff5c 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-19T08:49:34,276 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/85902be5dfe7462caa1b5b773b32170e is 1080, key is row0146/info:/1732006174238/Put/seqid=0 2024-11-19T08:49:34,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741863_1039 (size=20078) 2024-11-19T08:49:34,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741863_1039 (size=20078) 2024-11-19T08:49:34,281 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/85902be5dfe7462caa1b5b773b32170e 2024-11-19T08:49:34,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/85902be5dfe7462caa1b5b773b32170e as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/85902be5dfe7462caa1b5b773b32170e 2024-11-19T08:49:34,292 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/85902be5dfe7462caa1b5b773b32170e, entries=14, sequenceid=207, filesize=19.6 K 2024-11-19T08:49:34,293 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=1.05 KB/1076 for 6d1ec013cc486e1d83824c31ac9dff5c in 22ms, sequenceid=207, compaction requested=false 2024-11-19T08:49:34,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:34,675 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/e645688dcea3485e96767f70a8b7b9e0 as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/e645688dcea3485e96767f70a8b7b9e0 2024-11-19T08:49:34,683 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6d1ec013cc486e1d83824c31ac9dff5c/info of 6d1ec013cc486e1d83824c31ac9dff5c into e645688dcea3485e96767f70a8b7b9e0(size=79.1 K), total size for store is 118.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T08:49:34,683 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:34,683 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c., storeName=6d1ec013cc486e1d83824c31ac9dff5c/info, priority=13, startTime=1732006174235; duration=0sec 2024-11-19T08:49:34,683 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:34,683 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6d1ec013cc486e1d83824c31ac9dff5c:info 2024-11-19T08:49:35,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:35,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:36,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:36,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:36,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:36,288 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6d1ec013cc486e1d83824c31ac9dff5c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T08:49:36,293 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/aaf626a1ecd54cf68968cd39acba34ad is 1080, key is row0160/info:/1732006174273/Put/seqid=0 2024-11-19T08:49:36,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741864_1040 (size=12516) 2024-11-19T08:49:36,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741864_1040 (size=12516) 2024-11-19T08:49:36,299 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=218 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/aaf626a1ecd54cf68968cd39acba34ad 2024-11-19T08:49:36,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/aaf626a1ecd54cf68968cd39acba34ad as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/aaf626a1ecd54cf68968cd39acba34ad 2024-11-19T08:49:36,310 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/aaf626a1ecd54cf68968cd39acba34ad, entries=7, sequenceid=218, filesize=12.2 K 2024-11-19T08:49:36,311 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 6d1ec013cc486e1d83824c31ac9dff5c in 23ms, sequenceid=218, compaction requested=true 2024-11-19T08:49:36,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:36,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6d1ec013cc486e1d83824c31ac9dff5c:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T08:49:36,312 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T08:49:36,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:36,313 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133639 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-19T08:49:36,313 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1541): 6d1ec013cc486e1d83824c31ac9dff5c/info is initiating minor compaction (all files) 2024-11-19T08:49:36,313 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6d1ec013cc486e1d83824c31ac9dff5c/info in TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c. 2024-11-19T08:49:36,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:36,313 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/e645688dcea3485e96767f70a8b7b9e0, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/dd253c29253e4b36a10fa473a0b1f91c, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/85902be5dfe7462caa1b5b773b32170e, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/aaf626a1ecd54cf68968cd39acba34ad] into tmpdir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp, totalSize=130.5 K 2024-11-19T08:49:36,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6d1ec013cc486e1d83824c31ac9dff5c 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-19T08:49:36,314 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting e645688dcea3485e96767f70a8b7b9e0, keycount=70, bloomtype=ROW, size=79.1 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732006147954 2024-11-19T08:49:36,314 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting dd253c29253e4b36a10fa473a0b1f91c, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1732006174208 2024-11-19T08:49:36,315 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting 85902be5dfe7462caa1b5b773b32170e, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732006174238 2024-11-19T08:49:36,315 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting aaf626a1ecd54cf68968cd39acba34ad, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1732006174273 2024-11-19T08:49:36,320 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/7311b4e2252045e0af14a07fb8b6e55e is 1080, key is row0167/info:/1732006176290/Put/seqid=0 2024-11-19T08:49:36,346 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6d1ec013cc486e1d83824c31ac9dff5c#info#compaction#79 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T08:49:36,347 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/37d0a48786dd496c82ca116517dffb7f is 1080, key is row0062/info:/1732006147954/Put/seqid=0 2024-11-19T08:49:36,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741865_1041 (size=17906) 2024-11-19T08:49:36,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741865_1041 (size=17906) 2024-11-19T08:49:36,351 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/7311b4e2252045e0af14a07fb8b6e55e 2024-11-19T08:49:36,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741866_1042 (size=118841) 2024-11-19T08:49:36,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741866_1042 (size=118841) 2024-11-19T08:49:36,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/7311b4e2252045e0af14a07fb8b6e55e as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/7311b4e2252045e0af14a07fb8b6e55e 2024-11-19T08:49:36,358 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/37d0a48786dd496c82ca116517dffb7f as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/37d0a48786dd496c82ca116517dffb7f 2024-11-19T08:49:36,361 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/7311b4e2252045e0af14a07fb8b6e55e, entries=12, sequenceid=233, filesize=17.5 K 2024-11-19T08:49:36,362 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=14.71 KB/15064 for 6d1ec013cc486e1d83824c31ac9dff5c in 49ms, sequenceid=233, compaction requested=false 2024-11-19T08:49:36,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:36,364 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 6d1ec013cc486e1d83824c31ac9dff5c/info of 6d1ec013cc486e1d83824c31ac9dff5c into 37d0a48786dd496c82ca116517dffb7f(size=116.1 K), total size for store is 133.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T08:49:36,364 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:36,364 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c., storeName=6d1ec013cc486e1d83824c31ac9dff5c/info, priority=12, startTime=1732006176312; duration=0sec 2024-11-19T08:49:36,364 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:36,364 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6d1ec013cc486e1d83824c31ac9dff5c:info 2024-11-19T08:49:37,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:37,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:38,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:38,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:38,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:38,347 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6d1ec013cc486e1d83824c31ac9dff5c 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-19T08:49:38,353 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/4ec42a9d0f9b4af2839e4448f3349bbe is 1080, key is row0179/info:/1732006176315/Put/seqid=0 2024-11-19T08:49:38,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741867_1043 (size=21156) 2024-11-19T08:49:38,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741867_1043 (size=21156) 2024-11-19T08:49:38,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=6d1ec013cc486e1d83824c31ac9dff5c, server=3ab37fa97a98,43395,1732006124464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-19T08:49:38,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34592 deadline: 1732006188380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=6d1ec013cc486e1d83824c31ac9dff5c, server=3ab37fa97a98,43395,1732006124464 2024-11-19T08:49:38,381 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c., hostname=3ab37fa97a98,43395,1732006124464, seqNum=86 , the old value is region=TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c., hostname=3ab37fa97a98,43395,1732006124464, seqNum=86, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=6d1ec013cc486e1d83824c31ac9dff5c, server=3ab37fa97a98,43395,1732006124464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T08:49:38,381 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c., hostname=3ab37fa97a98,43395,1732006124464, seqNum=86 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=6d1ec013cc486e1d83824c31ac9dff5c, server=3ab37fa97a98,43395,1732006124464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T08:49:38,381 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c., hostname=3ab37fa97a98,43395,1732006124464, seqNum=86 because the exception is null or not the one we care about 2024-11-19T08:49:38,764 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/4ec42a9d0f9b4af2839e4448f3349bbe 2024-11-19T08:49:38,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/4ec42a9d0f9b4af2839e4448f3349bbe as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/4ec42a9d0f9b4af2839e4448f3349bbe 2024-11-19T08:49:38,776 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/4ec42a9d0f9b4af2839e4448f3349bbe, entries=15, sequenceid=252, filesize=20.7 K 2024-11-19T08:49:38,777 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=14.71 KB/15064 for 6d1ec013cc486e1d83824c31ac9dff5c in 430ms, sequenceid=252, compaction requested=true 2024-11-19T08:49:38,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:38,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6d1ec013cc486e1d83824c31ac9dff5c:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T08:49:38,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:38,777 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T08:49:38,778 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 157903 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T08:49:38,779 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1541): 6d1ec013cc486e1d83824c31ac9dff5c/info is initiating minor compaction (all files) 2024-11-19T08:49:38,779 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6d1ec013cc486e1d83824c31ac9dff5c/info in TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c. 2024-11-19T08:49:38,779 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/37d0a48786dd496c82ca116517dffb7f, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/7311b4e2252045e0af14a07fb8b6e55e, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/4ec42a9d0f9b4af2839e4448f3349bbe] into tmpdir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp, totalSize=154.2 K 2024-11-19T08:49:38,779 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting 37d0a48786dd496c82ca116517dffb7f, keycount=105, bloomtype=ROW, size=116.1 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1732006147954 2024-11-19T08:49:38,780 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7311b4e2252045e0af14a07fb8b6e55e, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732006176290 2024-11-19T08:49:38,780 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4ec42a9d0f9b4af2839e4448f3349bbe, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732006176315 2024-11-19T08:49:38,797 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6d1ec013cc486e1d83824c31ac9dff5c#info#compaction#81 average throughput is 45.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T08:49:38,798 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/36f9d40130b44e5481364cada4cfd989 is 1080, key is row0062/info:/1732006147954/Put/seqid=0 2024-11-19T08:49:38,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741868_1044 (size=148254) 2024-11-19T08:49:38,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741868_1044 (size=148254) 2024-11-19T08:49:38,807 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/36f9d40130b44e5481364cada4cfd989 as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/36f9d40130b44e5481364cada4cfd989 2024-11-19T08:49:38,813 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6d1ec013cc486e1d83824c31ac9dff5c/info of 6d1ec013cc486e1d83824c31ac9dff5c into 36f9d40130b44e5481364cada4cfd989(size=144.8 K), total size for store is 144.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T08:49:38,813 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:38,813 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c., storeName=6d1ec013cc486e1d83824c31ac9dff5c/info, priority=13, startTime=1732006178777; duration=0sec 2024-11-19T08:49:38,813 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:38,813 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6d1ec013cc486e1d83824c31ac9dff5c:info 2024-11-19T08:49:39,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:39,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:40,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:40,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:41,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:41,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:42,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:42,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:43,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:43,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:43,282 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=66, reuseRatio=88.00% 2024-11-19T08:49:43,283 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-19T08:49:44,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:44,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:44,284 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T08:49:45,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:45,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:46,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:46,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:47,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:47,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:48,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:48,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:48,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:48,417 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6d1ec013cc486e1d83824c31ac9dff5c 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-19T08:49:48,423 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/7678abfa40fe479a821792c7e3f2ce00 is 1080, key is row0194/info:/1732006178349/Put/seqid=0 2024-11-19T08:49:48,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741869_1045 (size=21169) 2024-11-19T08:49:48,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741869_1045 (size=21169) 2024-11-19T08:49:48,432 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/7678abfa40fe479a821792c7e3f2ce00 2024-11-19T08:49:48,439 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/7678abfa40fe479a821792c7e3f2ce00 as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/7678abfa40fe479a821792c7e3f2ce00 2024-11-19T08:49:48,445 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/7678abfa40fe479a821792c7e3f2ce00, entries=15, sequenceid=271, filesize=20.7 K 2024-11-19T08:49:48,446 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=11.56 KB/11836 for 6d1ec013cc486e1d83824c31ac9dff5c in 30ms, sequenceid=271, compaction requested=false 2024-11-19T08:49:48,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:48,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:48,448 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6d1ec013cc486e1d83824c31ac9dff5c 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-19T08:49:48,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/9550ed7319764bc5bf57b9ade2d3dd2e is 1080, key is row0209/info:/1732006188419/Put/seqid=0 2024-11-19T08:49:48,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741870_1046 (size=17918) 2024-11-19T08:49:48,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741870_1046 (size=17918) 2024-11-19T08:49:48,458 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/9550ed7319764bc5bf57b9ade2d3dd2e 2024-11-19T08:49:48,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/9550ed7319764bc5bf57b9ade2d3dd2e as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/9550ed7319764bc5bf57b9ade2d3dd2e 2024-11-19T08:49:48,469 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/9550ed7319764bc5bf57b9ade2d3dd2e, entries=12, sequenceid=286, filesize=17.5 K 2024-11-19T08:49:48,470 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=4.20 KB/4304 for 6d1ec013cc486e1d83824c31ac9dff5c in 23ms, sequenceid=286, compaction requested=true 2024-11-19T08:49:48,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:48,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6d1ec013cc486e1d83824c31ac9dff5c:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T08:49:48,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:48,471 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T08:49:48,472 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 187341 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T08:49:48,472 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1541): 6d1ec013cc486e1d83824c31ac9dff5c/info is initiating minor compaction (all files) 2024-11-19T08:49:48,472 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6d1ec013cc486e1d83824c31ac9dff5c/info in TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c. 2024-11-19T08:49:48,472 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/36f9d40130b44e5481364cada4cfd989, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/7678abfa40fe479a821792c7e3f2ce00, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/9550ed7319764bc5bf57b9ade2d3dd2e] into tmpdir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp, totalSize=183.0 K 2024-11-19T08:49:48,472 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting 36f9d40130b44e5481364cada4cfd989, keycount=132, bloomtype=ROW, size=144.8 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732006147954 2024-11-19T08:49:48,473 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7678abfa40fe479a821792c7e3f2ce00, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1732006178349 2024-11-19T08:49:48,473 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9550ed7319764bc5bf57b9ade2d3dd2e, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1732006188419 2024-11-19T08:49:48,484 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6d1ec013cc486e1d83824c31ac9dff5c#info#compaction#84 average throughput is 54.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T08:49:48,484 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/a49105e497fb4d35ac589998993e7850 is 1080, key is row0062/info:/1732006147954/Put/seqid=0 2024-11-19T08:49:48,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741871_1047 (size=177495) 2024-11-19T08:49:48,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741871_1047 (size=177495) 2024-11-19T08:49:48,494 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/a49105e497fb4d35ac589998993e7850 as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/a49105e497fb4d35ac589998993e7850 2024-11-19T08:49:48,500 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6d1ec013cc486e1d83824c31ac9dff5c/info of 6d1ec013cc486e1d83824c31ac9dff5c into a49105e497fb4d35ac589998993e7850(size=173.3 K), total size for store is 173.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T08:49:48,500 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:48,500 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c., storeName=6d1ec013cc486e1d83824c31ac9dff5c/info, priority=13, startTime=1732006188470; duration=0sec 2024-11-19T08:49:48,501 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:48,501 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6d1ec013cc486e1d83824c31ac9dff5c:info 2024-11-19T08:49:49,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:49,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:50,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:50,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:50,378 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,382 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:50,464 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6d1ec013cc486e1d83824c31ac9dff5c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T08:49:50,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/ac441e7447be4ad9b16c0ce69525601d is 1080, key is row0221/info:/1732006188449/Put/seqid=0 2024-11-19T08:49:50,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741872_1048 (size=12523) 2024-11-19T08:49:50,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741872_1048 (size=12523) 2024-11-19T08:49:50,477 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/ac441e7447be4ad9b16c0ce69525601d 2024-11-19T08:49:50,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/ac441e7447be4ad9b16c0ce69525601d as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/ac441e7447be4ad9b16c0ce69525601d 2024-11-19T08:49:50,489 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/ac441e7447be4ad9b16c0ce69525601d, entries=7, sequenceid=297, filesize=12.2 K 2024-11-19T08:49:50,490 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 6d1ec013cc486e1d83824c31ac9dff5c in 27ms, sequenceid=297, compaction requested=false 2024-11-19T08:49:50,490 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:50,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:50,490 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6d1ec013cc486e1d83824c31ac9dff5c 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-19T08:49:50,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/af9aa69e73414199b464d45df0f09b30 is 1080, key is row0228/info:/1732006190465/Put/seqid=0 2024-11-19T08:49:50,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741873_1049 (size=17918) 2024-11-19T08:49:50,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741873_1049 (size=17918) 2024-11-19T08:49:50,502 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/af9aa69e73414199b464d45df0f09b30 2024-11-19T08:49:50,507 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/af9aa69e73414199b464d45df0f09b30 as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/af9aa69e73414199b464d45df0f09b30 2024-11-19T08:49:50,512 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/af9aa69e73414199b464d45df0f09b30, entries=12, sequenceid=312, filesize=17.5 K 2024-11-19T08:49:50,513 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for 6d1ec013cc486e1d83824c31ac9dff5c in 23ms, sequenceid=312, compaction requested=true 2024-11-19T08:49:50,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:50,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6d1ec013cc486e1d83824c31ac9dff5c:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T08:49:50,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:50,514 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T08:49:50,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43395 {}] regionserver.HRegion(8855): Flush requested on 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:50,514 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6d1ec013cc486e1d83824c31ac9dff5c 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-19T08:49:50,515 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 207936 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T08:49:50,515 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1541): 6d1ec013cc486e1d83824c31ac9dff5c/info is initiating minor compaction (all files) 2024-11-19T08:49:50,515 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6d1ec013cc486e1d83824c31ac9dff5c/info in TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c. 2024-11-19T08:49:50,515 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/a49105e497fb4d35ac589998993e7850, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/ac441e7447be4ad9b16c0ce69525601d, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/af9aa69e73414199b464d45df0f09b30] into tmpdir=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp, totalSize=203.1 K 2024-11-19T08:49:50,515 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting a49105e497fb4d35ac589998993e7850, keycount=159, bloomtype=ROW, size=173.3 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1732006147954 2024-11-19T08:49:50,516 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting ac441e7447be4ad9b16c0ce69525601d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732006188449 2024-11-19T08:49:50,516 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] compactions.Compactor(225): Compacting af9aa69e73414199b464d45df0f09b30, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732006190465 2024-11-19T08:49:50,518 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/28e8225b982b4b7aa97de0d41b509487 is 1080, key is row0240/info:/1732006190492/Put/seqid=0 2024-11-19T08:49:50,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741874_1050 (size=17918) 2024-11-19T08:49:50,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741874_1050 (size=17918) 2024-11-19T08:49:50,524 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/28e8225b982b4b7aa97de0d41b509487 2024-11-19T08:49:50,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/28e8225b982b4b7aa97de0d41b509487 as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/28e8225b982b4b7aa97de0d41b509487 2024-11-19T08:49:50,530 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6d1ec013cc486e1d83824c31ac9dff5c#info#compaction#88 average throughput is 45.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T08:49:50,531 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/85134f8740504f0889605c38977f681b is 1080, key is row0062/info:/1732006147954/Put/seqid=0 2024-11-19T08:49:50,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741875_1051 (size=198102) 2024-11-19T08:49:50,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741875_1051 (size=198102) 2024-11-19T08:49:50,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/28e8225b982b4b7aa97de0d41b509487, entries=12, sequenceid=327, filesize=17.5 K 2024-11-19T08:49:50,537 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=5.25 KB/5380 for 6d1ec013cc486e1d83824c31ac9dff5c in 23ms, sequenceid=327, compaction requested=false 2024-11-19T08:49:50,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:50,539 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/85134f8740504f0889605c38977f681b as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/85134f8740504f0889605c38977f681b 2024-11-19T08:49:50,544 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6d1ec013cc486e1d83824c31ac9dff5c/info of 6d1ec013cc486e1d83824c31ac9dff5c into 85134f8740504f0889605c38977f681b(size=193.5 K), total size for store is 211.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T08:49:50,544 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:50,544 INFO [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c., storeName=6d1ec013cc486e1d83824c31ac9dff5c/info, priority=13, startTime=1732006190514; duration=0sec 2024-11-19T08:49:50,544 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T08:49:50,544 DEBUG [RS:0;3ab37fa97a98:43395-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6d1ec013cc486e1d83824c31ac9dff5c:info 2024-11-19T08:49:50,918 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T08:49:50,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:50,952 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T08:49:51,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:51,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:52,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:52,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:52,532 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-19T08:49:52,532 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C43395%2C1732006124464.1732006192532 2024-11-19T08:49:52,553 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:52,553 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:52,553 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:52,554 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:52,554 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:52,554 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/WALs/3ab37fa97a98,43395,1732006124464/3ab37fa97a98%2C43395%2C1732006124464.1732006124993 with entries=313, filesize=308.47 KB; new WAL /user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/WALs/3ab37fa97a98,43395,1732006124464/3ab37fa97a98%2C43395%2C1732006124464.1732006192532 2024-11-19T08:49:52,555 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37747:37747),(127.0.0.1/127.0.0.1:33099:33099)] 2024-11-19T08:49:52,555 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/WALs/3ab37fa97a98,43395,1732006124464/3ab37fa97a98%2C43395%2C1732006124464.1732006124993 is not closed yet, will try archiving it next time 2024-11-19T08:49:52,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741833_1009 (size=315885) 2024-11-19T08:49:52,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741833_1009 (size=315885) 2024-11-19T08:49:52,559 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for e9a2694a1253a4604bd998ea0bb567ca: 2024-11-19T08:49:52,559 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 6d1ec013cc486e1d83824c31ac9dff5c 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-11-19T08:49:52,565 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/ff384dbed712447bafd763b2bc2fed63 is 1080, key is row0252/info:/1732006190516/Put/seqid=0 2024-11-19T08:49:52,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741877_1053 (size=10357) 2024-11-19T08:49:52,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741877_1053 (size=10357) 2024-11-19T08:49:52,572 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/ff384dbed712447bafd763b2bc2fed63 2024-11-19T08:49:52,577 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/.tmp/info/ff384dbed712447bafd763b2bc2fed63 as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/ff384dbed712447bafd763b2bc2fed63 2024-11-19T08:49:52,581 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/ff384dbed712447bafd763b2bc2fed63, entries=5, sequenceid=336, filesize=10.1 K 2024-11-19T08:49:52,582 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for 6d1ec013cc486e1d83824c31ac9dff5c in 23ms, sequenceid=336, compaction requested=true 2024-11-19T08:49:52,582 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 6d1ec013cc486e1d83824c31ac9dff5c: 2024-11-19T08:49:52,582 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-19T08:49:52,586 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/.tmp/info/0099e39675b2489cb18a4bae3c3240ed is 186, key is TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca./info:regioninfo/1732006158741/Put/seqid=0 2024-11-19T08:49:52,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741878_1054 (size=6153) 2024-11-19T08:49:52,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741878_1054 (size=6153) 2024-11-19T08:49:52,590 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/.tmp/info/0099e39675b2489cb18a4bae3c3240ed 2024-11-19T08:49:52,594 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/.tmp/info/0099e39675b2489cb18a4bae3c3240ed as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/info/0099e39675b2489cb18a4bae3c3240ed 2024-11-19T08:49:52,599 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/info/0099e39675b2489cb18a4bae3c3240ed, entries=5, sequenceid=21, filesize=6.0 K 2024-11-19T08:49:52,599 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 17ms, sequenceid=21, compaction requested=false 2024-11-19T08:49:52,599 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-19T08:49:52,600 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C43395%2C1732006124464.1732006192600 2024-11-19T08:49:52,604 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:52,604 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:52,604 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:52,604 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:52,605 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:52,605 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/WALs/3ab37fa97a98,43395,1732006124464/3ab37fa97a98%2C43395%2C1732006124464.1732006192532 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/WALs/3ab37fa97a98,43395,1732006124464/3ab37fa97a98%2C43395%2C1732006124464.1732006192600 2024-11-19T08:49:52,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741876_1052 (size=731) 2024-11-19T08:49:52,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741876_1052 (size=731) 2024-11-19T08:49:52,607 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33099:33099),(127.0.0.1/127.0.0.1:37747:37747)] 2024-11-19T08:49:52,607 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/WALs/3ab37fa97a98,43395,1732006124464/3ab37fa97a98%2C43395%2C1732006124464.1732006192532 is not closed yet, will try archiving it next time 2024-11-19T08:49:52,607 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/WALs/3ab37fa97a98,43395,1732006124464/3ab37fa97a98%2C43395%2C1732006124464.1732006124993 to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/oldWALs/3ab37fa97a98%2C43395%2C1732006124464.1732006124993 2024-11-19T08:49:52,608 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T08:49:52,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T08:49:52,608 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T08:49:52,608 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:49:52,608 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:49:52,608 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/WALs/3ab37fa97a98,43395,1732006124464/3ab37fa97a98%2C43395%2C1732006124464.1732006192532 to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/oldWALs/3ab37fa97a98%2C43395%2C1732006124464.1732006192532 2024-11-19T08:49:52,608 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:49:52,608 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T08:49:52,608 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T08:49:52,608 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1795744571, stopped=false 2024-11-19T08:49:52,609 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3ab37fa97a98,38369,1732006124300 2024-11-19T08:49:52,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T08:49:52,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T08:49:52,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:49:52,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:49:52,672 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T08:49:52,672 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T08:49:52,673 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:49:52,673 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:49:52,673 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:49:52,673 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:49:52,673 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3ab37fa97a98,43395,1732006124464' ***** 2024-11-19T08:49:52,674 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T08:49:52,674 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T08:49:52,675 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T08:49:52,675 INFO [RS:0;3ab37fa97a98:43395 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T08:49:52,675 INFO [RS:0;3ab37fa97a98:43395 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T08:49:52,676 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.HRegionServer(3091): Received CLOSE for e9a2694a1253a4604bd998ea0bb567ca 2024-11-19T08:49:52,676 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.HRegionServer(3091): Received CLOSE for 6d1ec013cc486e1d83824c31ac9dff5c 2024-11-19T08:49:52,676 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.HRegionServer(959): stopping server 3ab37fa97a98,43395,1732006124464 2024-11-19T08:49:52,676 INFO [RS:0;3ab37fa97a98:43395 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T08:49:52,676 INFO [RS:0;3ab37fa97a98:43395 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3ab37fa97a98:43395. 2024-11-19T08:49:52,676 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e9a2694a1253a4604bd998ea0bb567ca, disabling compactions & flushes 2024-11-19T08:49:52,676 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca. 2024-11-19T08:49:52,676 DEBUG [RS:0;3ab37fa97a98:43395 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:49:52,676 DEBUG [RS:0;3ab37fa97a98:43395 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:49:52,676 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca. 2024-11-19T08:49:52,676 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca. after waiting 0 ms 2024-11-19T08:49:52,676 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca. 2024-11-19T08:49:52,676 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T08:49:52,676 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T08:49:52,676 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T08:49:52,677 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T08:49:52,677 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-19T08:49:52,677 DEBUG [RS:0;3ab37fa97a98:43395 {}] regionserver.HRegionServer(1325): Online Regions={e9a2694a1253a4604bd998ea0bb567ca=TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca., 6d1ec013cc486e1d83824c31ac9dff5c=TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c., 1588230740=hbase:meta,,1.1588230740} 2024-11-19T08:49:52,677 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T08:49:52,677 DEBUG [RS:0;3ab37fa97a98:43395 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6d1ec013cc486e1d83824c31ac9dff5c, e9a2694a1253a4604bd998ea0bb567ca 2024-11-19T08:49:52,677 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T08:49:52,677 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T08:49:52,677 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T08:49:52,677 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T08:49:52,677 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/e9a2694a1253a4604bd998ea0bb567ca/info/ceb166b1116b4055acfd88a50aaf1de2.bc4de71ea654c058906436fa4960c66c->hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/ceb166b1116b4055acfd88a50aaf1de2-bottom] to archive 2024-11-19T08:49:52,678 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T08:49:52,680 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/e9a2694a1253a4604bd998ea0bb567ca/info/ceb166b1116b4055acfd88a50aaf1de2.bc4de71ea654c058906436fa4960c66c to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/e9a2694a1253a4604bd998ea0bb567ca/info/ceb166b1116b4055acfd88a50aaf1de2.bc4de71ea654c058906436fa4960c66c 2024-11-19T08:49:52,681 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3ab37fa97a98:38369 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-19T08:49:52,681 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-19T08:49:52,682 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-19T08:49:52,683 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T08:49:52,683 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T08:49:52,683 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732006192677Running coprocessor pre-close hooks at 1732006192677Disabling compacts and flushes for region at 1732006192677Disabling writes for close at 1732006192677Writing region close event to WAL at 1732006192678 (+1 ms)Running coprocessor post-close hooks at 1732006192683 (+5 ms)Closed at 1732006192683 2024-11-19T08:49:52,683 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T08:49:52,685 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/e9a2694a1253a4604bd998ea0bb567ca/recovered.edits/90.seqid, newMaxSeqId=90, maxSeqId=85 2024-11-19T08:49:52,685 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca. 2024-11-19T08:49:52,685 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e9a2694a1253a4604bd998ea0bb567ca: Waiting for close lock at 1732006192676Running coprocessor pre-close hooks at 1732006192676Disabling compacts and flushes for region at 1732006192676Disabling writes for close at 1732006192676Writing region close event to WAL at 1732006192681 (+5 ms)Running coprocessor post-close hooks at 1732006192685 (+4 ms)Closed at 1732006192685 2024-11-19T08:49:52,685 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732006158071.e9a2694a1253a4604bd998ea0bb567ca. 2024-11-19T08:49:52,685 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6d1ec013cc486e1d83824c31ac9dff5c, disabling compactions & flushes 2024-11-19T08:49:52,685 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c. 2024-11-19T08:49:52,685 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c. 2024-11-19T08:49:52,685 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c. after waiting 0 ms 2024-11-19T08:49:52,685 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c. 2024-11-19T08:49:52,686 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/ceb166b1116b4055acfd88a50aaf1de2.bc4de71ea654c058906436fa4960c66c->hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/bc4de71ea654c058906436fa4960c66c/info/ceb166b1116b4055acfd88a50aaf1de2-top, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/792d261b68ee4547b50e945de3663630, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/e481b2e614b2489f93761280dc684a3f, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/6840bfdd1ab1471299829f1514163c1a, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/c2f148530ef541a48510ba76a2fe7202, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/6658d0c900704848ab6c01ec4c34351b, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/7f9bf188403247e8b202aacd2c53245e, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/fd60410bbe2b41018ec005bde0c58d85, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/08b171a0c1224092b182f4792e532107, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/e645688dcea3485e96767f70a8b7b9e0, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/12e3407c9e5f48478ac87aa5a6eedfca, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/dd253c29253e4b36a10fa473a0b1f91c, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/85902be5dfe7462caa1b5b773b32170e, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/37d0a48786dd496c82ca116517dffb7f, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/aaf626a1ecd54cf68968cd39acba34ad, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/7311b4e2252045e0af14a07fb8b6e55e, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/36f9d40130b44e5481364cada4cfd989, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/4ec42a9d0f9b4af2839e4448f3349bbe, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/7678abfa40fe479a821792c7e3f2ce00, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/a49105e497fb4d35ac589998993e7850, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/9550ed7319764bc5bf57b9ade2d3dd2e, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/ac441e7447be4ad9b16c0ce69525601d, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/af9aa69e73414199b464d45df0f09b30] to archive 2024-11-19T08:49:52,687 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T08:49:52,689 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/ceb166b1116b4055acfd88a50aaf1de2.bc4de71ea654c058906436fa4960c66c to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/ceb166b1116b4055acfd88a50aaf1de2.bc4de71ea654c058906436fa4960c66c 2024-11-19T08:49:52,690 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/792d261b68ee4547b50e945de3663630 to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/792d261b68ee4547b50e945de3663630 2024-11-19T08:49:52,691 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/e481b2e614b2489f93761280dc684a3f to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/e481b2e614b2489f93761280dc684a3f 2024-11-19T08:49:52,693 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/6840bfdd1ab1471299829f1514163c1a to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/6840bfdd1ab1471299829f1514163c1a 2024-11-19T08:49:52,694 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/c2f148530ef541a48510ba76a2fe7202 to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/c2f148530ef541a48510ba76a2fe7202 2024-11-19T08:49:52,695 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/6658d0c900704848ab6c01ec4c34351b to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/6658d0c900704848ab6c01ec4c34351b 2024-11-19T08:49:52,696 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/7f9bf188403247e8b202aacd2c53245e to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/7f9bf188403247e8b202aacd2c53245e 2024-11-19T08:49:52,697 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/fd60410bbe2b41018ec005bde0c58d85 to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/fd60410bbe2b41018ec005bde0c58d85 2024-11-19T08:49:52,698 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/08b171a0c1224092b182f4792e532107 to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/08b171a0c1224092b182f4792e532107 2024-11-19T08:49:52,699 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/e645688dcea3485e96767f70a8b7b9e0 to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/e645688dcea3485e96767f70a8b7b9e0 2024-11-19T08:49:52,700 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/12e3407c9e5f48478ac87aa5a6eedfca to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/12e3407c9e5f48478ac87aa5a6eedfca 2024-11-19T08:49:52,701 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/dd253c29253e4b36a10fa473a0b1f91c to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/dd253c29253e4b36a10fa473a0b1f91c 2024-11-19T08:49:52,703 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/85902be5dfe7462caa1b5b773b32170e to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/85902be5dfe7462caa1b5b773b32170e 2024-11-19T08:49:52,704 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/37d0a48786dd496c82ca116517dffb7f to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/37d0a48786dd496c82ca116517dffb7f 2024-11-19T08:49:52,705 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/aaf626a1ecd54cf68968cd39acba34ad to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/aaf626a1ecd54cf68968cd39acba34ad 2024-11-19T08:49:52,706 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/7311b4e2252045e0af14a07fb8b6e55e to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/7311b4e2252045e0af14a07fb8b6e55e 2024-11-19T08:49:52,708 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/36f9d40130b44e5481364cada4cfd989 to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/36f9d40130b44e5481364cada4cfd989 2024-11-19T08:49:52,710 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/4ec42a9d0f9b4af2839e4448f3349bbe to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/4ec42a9d0f9b4af2839e4448f3349bbe 2024-11-19T08:49:52,711 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/7678abfa40fe479a821792c7e3f2ce00 to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/7678abfa40fe479a821792c7e3f2ce00 2024-11-19T08:49:52,712 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/a49105e497fb4d35ac589998993e7850 to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/a49105e497fb4d35ac589998993e7850 2024-11-19T08:49:52,713 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/9550ed7319764bc5bf57b9ade2d3dd2e to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/9550ed7319764bc5bf57b9ade2d3dd2e 2024-11-19T08:49:52,714 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/ac441e7447be4ad9b16c0ce69525601d to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/ac441e7447be4ad9b16c0ce69525601d 2024-11-19T08:49:52,715 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/af9aa69e73414199b464d45df0f09b30 to hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/archive/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/info/af9aa69e73414199b464d45df0f09b30 2024-11-19T08:49:52,715 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [792d261b68ee4547b50e945de3663630=8260, e481b2e614b2489f93761280dc684a3f=12509, 6840bfdd1ab1471299829f1514163c1a=29932, c2f148530ef541a48510ba76a2fe7202=18987, 6658d0c900704848ab6c01ec4c34351b=18988, 7f9bf188403247e8b202aacd2c53245e=62466, fd60410bbe2b41018ec005bde0c58d85=23316, 08b171a0c1224092b182f4792e532107=15750, e645688dcea3485e96767f70a8b7b9e0=80967, 12e3407c9e5f48478ac87aa5a6eedfca=12516, dd253c29253e4b36a10fa473a0b1f91c=20078, 85902be5dfe7462caa1b5b773b32170e=20078, 37d0a48786dd496c82ca116517dffb7f=118841, aaf626a1ecd54cf68968cd39acba34ad=12516, 7311b4e2252045e0af14a07fb8b6e55e=17906, 36f9d40130b44e5481364cada4cfd989=148254, 4ec42a9d0f9b4af2839e4448f3349bbe=21156, 7678abfa40fe479a821792c7e3f2ce00=21169, a49105e497fb4d35ac589998993e7850=177495, 9550ed7319764bc5bf57b9ade2d3dd2e=17918, ac441e7447be4ad9b16c0ce69525601d=12523, af9aa69e73414199b464d45df0f09b30=17918] 2024-11-19T08:49:52,720 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/data/default/TestLogRolling-testLogRolling/6d1ec013cc486e1d83824c31ac9dff5c/recovered.edits/339.seqid, newMaxSeqId=339, maxSeqId=85 2024-11-19T08:49:52,720 INFO [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c. 2024-11-19T08:49:52,721 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6d1ec013cc486e1d83824c31ac9dff5c: Waiting for close lock at 1732006192685Running coprocessor pre-close hooks at 1732006192685Disabling compacts and flushes for region at 1732006192685Disabling writes for close at 1732006192685Writing region close event to WAL at 1732006192716 (+31 ms)Running coprocessor post-close hooks at 1732006192720 (+4 ms)Closed at 1732006192720 2024-11-19T08:49:52,721 DEBUG [RS_CLOSE_REGION-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732006158071.6d1ec013cc486e1d83824c31ac9dff5c. 2024-11-19T08:49:52,859 INFO [regionserver/3ab37fa97a98:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T08:49:52,861 INFO [regionserver/3ab37fa97a98:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T08:49:52,861 INFO [regionserver/3ab37fa97a98:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T08:49:52,877 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.HRegionServer(976): stopping server 3ab37fa97a98,43395,1732006124464; all regions closed. 2024-11-19T08:49:52,878 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:52,878 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:52,878 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:52,878 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:52,878 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:52,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741834_1010 (size=8107) 2024-11-19T08:49:52,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741834_1010 (size=8107) 2024-11-19T08:49:52,884 DEBUG [RS:0;3ab37fa97a98:43395 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/oldWALs 2024-11-19T08:49:52,884 INFO [RS:0;3ab37fa97a98:43395 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3ab37fa97a98%2C43395%2C1732006124464.meta:.meta(num 1732006125436) 2024-11-19T08:49:52,884 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:52,884 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:52,885 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:52,885 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:52,885 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:52,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741879_1055 (size=778) 2024-11-19T08:49:52,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741879_1055 (size=778) 2024-11-19T08:49:52,889 DEBUG [RS:0;3ab37fa97a98:43395 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/oldWALs 2024-11-19T08:49:52,889 INFO [RS:0;3ab37fa97a98:43395 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3ab37fa97a98%2C43395%2C1732006124464:(num 1732006192600) 2024-11-19T08:49:52,889 DEBUG [RS:0;3ab37fa97a98:43395 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:49:52,889 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T08:49:52,890 INFO [RS:0;3ab37fa97a98:43395 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T08:49:52,890 INFO [RS:0;3ab37fa97a98:43395 {}] hbase.ChoreService(370): Chore service for: regionserver/3ab37fa97a98:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-19T08:49:52,890 INFO [RS:0;3ab37fa97a98:43395 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T08:49:52,890 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T08:49:52,890 INFO [RS:0;3ab37fa97a98:43395 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43395 2024-11-19T08:49:52,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T08:49:52,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3ab37fa97a98,43395,1732006124464 2024-11-19T08:49:52,921 INFO [RS:0;3ab37fa97a98:43395 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T08:49:52,922 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3ab37fa97a98,43395,1732006124464] 2024-11-19T08:49:52,938 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3ab37fa97a98,43395,1732006124464 already deleted, retry=false 2024-11-19T08:49:52,938 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3ab37fa97a98,43395,1732006124464 expired; onlineServers=0 2024-11-19T08:49:52,938 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3ab37fa97a98,38369,1732006124300' ***** 2024-11-19T08:49:52,938 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T08:49:52,938 INFO [M:0;3ab37fa97a98:38369 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T08:49:52,938 INFO [M:0;3ab37fa97a98:38369 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T08:49:52,938 DEBUG [M:0;3ab37fa97a98:38369 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T08:49:52,938 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T08:49:52,938 DEBUG [M:0;3ab37fa97a98:38369 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T08:49:52,938 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732006124782 {}] cleaner.HFileCleaner(306): Exit Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732006124782,5,FailOnTimeoutGroup] 2024-11-19T08:49:52,938 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732006124782 {}] cleaner.HFileCleaner(306): Exit Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732006124782,5,FailOnTimeoutGroup] 2024-11-19T08:49:52,938 INFO [M:0;3ab37fa97a98:38369 {}] hbase.ChoreService(370): Chore service for: master/3ab37fa97a98:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T08:49:52,938 INFO [M:0;3ab37fa97a98:38369 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T08:49:52,938 DEBUG [M:0;3ab37fa97a98:38369 {}] master.HMaster(1795): Stopping service threads 2024-11-19T08:49:52,939 INFO [M:0;3ab37fa97a98:38369 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T08:49:52,939 INFO [M:0;3ab37fa97a98:38369 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T08:49:52,939 INFO [M:0;3ab37fa97a98:38369 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T08:49:52,939 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T08:49:52,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T08:49:52,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:49:52,946 DEBUG [M:0;3ab37fa97a98:38369 {}] zookeeper.ZKUtil(347): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T08:49:52,946 WARN [M:0;3ab37fa97a98:38369 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T08:49:52,947 INFO [M:0;3ab37fa97a98:38369 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/.lastflushedseqids 2024-11-19T08:49:52,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741880_1056 (size=228) 2024-11-19T08:49:52,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741880_1056 (size=228) 2024-11-19T08:49:52,954 INFO [M:0;3ab37fa97a98:38369 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T08:49:52,955 INFO [M:0;3ab37fa97a98:38369 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T08:49:52,955 DEBUG [M:0;3ab37fa97a98:38369 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T08:49:52,955 INFO [M:0;3ab37fa97a98:38369 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:49:52,955 DEBUG [M:0;3ab37fa97a98:38369 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:49:52,955 DEBUG [M:0;3ab37fa97a98:38369 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T08:49:52,955 DEBUG [M:0;3ab37fa97a98:38369 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:49:52,955 INFO [M:0;3ab37fa97a98:38369 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-19T08:49:52,977 DEBUG [M:0;3ab37fa97a98:38369 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/287973b571e8456ea8ca11e6d398f112 is 82, key is hbase:meta,,1/info:regioninfo/1732006125465/Put/seqid=0 2024-11-19T08:49:52,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741881_1057 (size=5672) 2024-11-19T08:49:52,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741881_1057 (size=5672) 2024-11-19T08:49:52,982 INFO [M:0;3ab37fa97a98:38369 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/287973b571e8456ea8ca11e6d398f112 2024-11-19T08:49:53,002 DEBUG [M:0;3ab37fa97a98:38369 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e91c9941684c41d4bfb01ec4c1f62fdc is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732006125982/Put/seqid=0 2024-11-19T08:49:53,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741882_1058 (size=7090) 2024-11-19T08:49:53,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741882_1058 (size=7090) 2024-11-19T08:49:53,008 INFO [M:0;3ab37fa97a98:38369 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e91c9941684c41d4bfb01ec4c1f62fdc 2024-11-19T08:49:53,015 INFO [M:0;3ab37fa97a98:38369 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e91c9941684c41d4bfb01ec4c1f62fdc 2024-11-19T08:49:53,035 INFO [RS:0;3ab37fa97a98:43395 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T08:49:53,035 INFO [RS:0;3ab37fa97a98:43395 {}] regionserver.HRegionServer(1031): Exiting; stopping=3ab37fa97a98,43395,1732006124464; zookeeper connection closed. 2024-11-19T08:49:53,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:49:53,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43395-0x1015391adc30001, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:49:53,035 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3c4fdf17 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3c4fdf17 2024-11-19T08:49:53,035 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T08:49:53,048 DEBUG [M:0;3ab37fa97a98:38369 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6b14d865e9b149628128ca5e962085e6 is 69, key is 3ab37fa97a98,43395,1732006124464/rs:state/1732006124838/Put/seqid=0 2024-11-19T08:49:53,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741883_1059 (size=5156) 2024-11-19T08:49:53,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741883_1059 (size=5156) 2024-11-19T08:49:53,060 INFO [M:0;3ab37fa97a98:38369 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6b14d865e9b149628128ca5e962085e6 2024-11-19T08:49:53,080 DEBUG [M:0;3ab37fa97a98:38369 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3dc6579a397d410cba852a16e7f89a93 is 52, key is load_balancer_on/state:d/1732006125605/Put/seqid=0 2024-11-19T08:49:53,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741884_1060 (size=5056) 2024-11-19T08:49:53,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741884_1060 (size=5056) 2024-11-19T08:49:53,085 INFO [M:0;3ab37fa97a98:38369 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3dc6579a397d410cba852a16e7f89a93 2024-11-19T08:49:53,091 DEBUG [M:0;3ab37fa97a98:38369 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/287973b571e8456ea8ca11e6d398f112 as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/287973b571e8456ea8ca11e6d398f112 2024-11-19T08:49:53,095 INFO [M:0;3ab37fa97a98:38369 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/287973b571e8456ea8ca11e6d398f112, entries=8, sequenceid=125, filesize=5.5 K 2024-11-19T08:49:53,096 DEBUG [M:0;3ab37fa97a98:38369 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e91c9941684c41d4bfb01ec4c1f62fdc as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e91c9941684c41d4bfb01ec4c1f62fdc 2024-11-19T08:49:53,100 INFO [M:0;3ab37fa97a98:38369 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e91c9941684c41d4bfb01ec4c1f62fdc 2024-11-19T08:49:53,100 INFO [M:0;3ab37fa97a98:38369 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e91c9941684c41d4bfb01ec4c1f62fdc, entries=13, sequenceid=125, filesize=6.9 K 2024-11-19T08:49:53,102 DEBUG [M:0;3ab37fa97a98:38369 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6b14d865e9b149628128ca5e962085e6 as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6b14d865e9b149628128ca5e962085e6 2024-11-19T08:49:53,106 INFO [M:0;3ab37fa97a98:38369 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6b14d865e9b149628128ca5e962085e6, entries=1, sequenceid=125, filesize=5.0 K 2024-11-19T08:49:53,107 DEBUG [M:0;3ab37fa97a98:38369 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3dc6579a397d410cba852a16e7f89a93 as hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3dc6579a397d410cba852a16e7f89a93 2024-11-19T08:49:53,111 INFO [M:0;3ab37fa97a98:38369 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33937/user/jenkins/test-data/a15d9468-0b03-9162-b3fd-c30348c428bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3dc6579a397d410cba852a16e7f89a93, entries=1, sequenceid=125, filesize=4.9 K 2024-11-19T08:49:53,112 INFO [M:0;3ab37fa97a98:38369 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 156ms, sequenceid=125, compaction requested=false 2024-11-19T08:49:53,113 INFO [M:0;3ab37fa97a98:38369 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:49:53,113 DEBUG [M:0;3ab37fa97a98:38369 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732006192955Disabling compacts and flushes for region at 1732006192955Disabling writes for close at 1732006192955Obtaining lock to block concurrent updates at 1732006192955Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732006192955Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1732006192955Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732006192956 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732006192956Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732006192976 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732006192976Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732006192987 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732006193002 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732006193002Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732006193015 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732006193048 (+33 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732006193048Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732006193064 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732006193080 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732006193080Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a4b2633: reopening flushed file at 1732006193090 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67098f71: reopening flushed file at 1732006193095 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67617567: reopening flushed file at 1732006193101 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ca7db4f: reopening flushed file at 1732006193106 (+5 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 156ms, sequenceid=125, compaction requested=false at 1732006193112 (+6 ms)Writing region close event to WAL at 1732006193113 (+1 ms)Closed at 1732006193113 2024-11-19T08:49:53,113 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:53,113 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:53,113 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:53,113 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:53,113 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:53,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741830_1006 (size=61320) 2024-11-19T08:49:53,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34735 is added to blk_1073741830_1006 (size=61320) 2024-11-19T08:49:53,116 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T08:49:53,116 INFO [M:0;3ab37fa97a98:38369 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T08:49:53,116 INFO [M:0;3ab37fa97a98:38369 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38369 2024-11-19T08:49:53,116 INFO [M:0;3ab37fa97a98:38369 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T08:49:53,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:49:53,221 INFO [M:0;3ab37fa97a98:38369 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T08:49:53,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38369-0x1015391adc30000, quorum=127.0.0.1:61360, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:49:53,270 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@79aff83f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:49:53,271 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@295546a4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:49:53,271 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:49:53,271 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1be3ce1b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:49:53,271 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d3adf0e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/hadoop.log.dir/,STOPPED} 2024-11-19T08:49:53,272 WARN [BP-115775057-172.17.0.2-1732006122738 heartbeating to localhost/127.0.0.1:33937 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:49:53,272 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:49:53,272 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:49:53,272 WARN [BP-115775057-172.17.0.2-1732006122738 heartbeating to localhost/127.0.0.1:33937 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-115775057-172.17.0.2-1732006122738 (Datanode Uuid e2be23fc-6a78-4958-b22e-8ceca0ac6a28) service to localhost/127.0.0.1:33937 2024-11-19T08:49:53,273 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/cluster_0914bb55-7897-0867-53f1-aa9cb98b056b/data/data3/current/BP-115775057-172.17.0.2-1732006122738 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:49:53,273 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/cluster_0914bb55-7897-0867-53f1-aa9cb98b056b/data/data4/current/BP-115775057-172.17.0.2-1732006122738 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:49:53,273 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:49:53,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:53,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:53,283 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@d0d4aa4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:49:53,284 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5cec8e5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:49:53,284 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:49:53,284 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@549b308b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:49:53,284 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e1d4a93{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/hadoop.log.dir/,STOPPED} 2024-11-19T08:49:53,285 WARN [BP-115775057-172.17.0.2-1732006122738 heartbeating to localhost/127.0.0.1:33937 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:49:53,285 WARN [BP-115775057-172.17.0.2-1732006122738 heartbeating to localhost/127.0.0.1:33937 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-115775057-172.17.0.2-1732006122738 (Datanode Uuid 5e386ada-2388-44c6-99ef-1a5a4fd6b3c4) service to localhost/127.0.0.1:33937 2024-11-19T08:49:53,286 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:49:53,286 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:49:53,286 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/cluster_0914bb55-7897-0867-53f1-aa9cb98b056b/data/data2/current/BP-115775057-172.17.0.2-1732006122738 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:49:53,287 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/cluster_0914bb55-7897-0867-53f1-aa9cb98b056b/data/data1/current/BP-115775057-172.17.0.2-1732006122738 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:49:53,287 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:49:53,292 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@d6c94d4{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T08:49:53,293 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4dcfcc47{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:49:53,293 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:49:53,293 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4419cf95{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:49:53,293 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@53e82728{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/hadoop.log.dir/,STOPPED} 2024-11-19T08:49:53,299 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T08:49:53,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T08:49:53,347 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=226 (was 206) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:33937 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33937 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33937 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:33937 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33937 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33937 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:33937 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:33937 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=515 (was 485) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=182 (was 219), ProcessCount=11 (was 11), AvailableMemoryMB=4845 (was 5027) 2024-11-19T08:49:53,356 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=226, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=182, ProcessCount=11, AvailableMemoryMB=4845 2024-11-19T08:49:53,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T08:49:53,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/hadoop.log.dir so I do NOT create it in target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c 2024-11-19T08:49:53,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2393a27d-7090-65e1-b159-ed5a9f6c28ae/hadoop.tmp.dir so I do NOT create it in target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c 2024-11-19T08:49:53,357 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/cluster_545578ae-88c8-497e-8073-c171cce8f270, deleteOnExit=true 2024-11-19T08:49:53,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T08:49:53,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/test.cache.data in system properties and HBase conf 2024-11-19T08:49:53,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T08:49:53,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/hadoop.log.dir in system properties and HBase conf 2024-11-19T08:49:53,358 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T08:49:53,358 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T08:49:53,358 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T08:49:53,358 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T08:49:53,358 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T08:49:53,358 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T08:49:53,358 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T08:49:53,358 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T08:49:53,358 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T08:49:53,359 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T08:49:53,359 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T08:49:53,359 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T08:49:53,359 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T08:49:53,359 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/nfs.dump.dir in system properties and HBase conf 2024-11-19T08:49:53,359 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/java.io.tmpdir in system properties and HBase conf 2024-11-19T08:49:53,359 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T08:49:53,359 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T08:49:53,359 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T08:49:53,372 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T08:49:53,619 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:49:53,622 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:49:53,623 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:49:53,623 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:49:53,623 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T08:49:53,624 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:49:53,624 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f1f87c1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:49:53,624 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32ef523b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:49:53,714 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5cf43c00{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/java.io.tmpdir/jetty-localhost-37385-hadoop-hdfs-3_4_1-tests_jar-_-any-17512265630945759489/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T08:49:53,715 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@951dead{HTTP/1.1, (http/1.1)}{localhost:37385} 2024-11-19T08:49:53,715 INFO [Time-limited test {}] server.Server(415): Started @318243ms 2024-11-19T08:49:53,725 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T08:49:53,943 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:49:53,947 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:49:53,950 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:49:53,951 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:49:53,951 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T08:49:53,951 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25e8389f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:49:53,951 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@571f6f1a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:49:54,069 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6447bc52{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/java.io.tmpdir/jetty-localhost-39791-hadoop-hdfs-3_4_1-tests_jar-_-any-18318103561066940761/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:49:54,070 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@44454b52{HTTP/1.1, (http/1.1)}{localhost:39791} 2024-11-19T08:49:54,070 INFO [Time-limited test {}] server.Server(415): Started @318598ms 2024-11-19T08:49:54,071 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:49:54,096 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T08:49:54,098 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T08:49:54,099 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T08:49:54,099 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T08:49:54,099 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T08:49:54,099 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68dc7da0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/hadoop.log.dir/,AVAILABLE} 2024-11-19T08:49:54,099 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20eba1fc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T08:49:54,205 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30a5c3a8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/java.io.tmpdir/jetty-localhost-33525-hadoop-hdfs-3_4_1-tests_jar-_-any-14394142273512797668/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:49:54,205 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4e7f38a5{HTTP/1.1, (http/1.1)}{localhost:33525} 2024-11-19T08:49:54,205 INFO [Time-limited test {}] server.Server(415): Started @318733ms 2024-11-19T08:49:54,206 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T08:49:54,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:54,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:54,769 WARN [Thread-2488 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/cluster_545578ae-88c8-497e-8073-c171cce8f270/data/data2/current/BP-167317329-172.17.0.2-1732006193375/current, will proceed with Du for space computation calculation, 2024-11-19T08:49:54,769 WARN [Thread-2487 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/cluster_545578ae-88c8-497e-8073-c171cce8f270/data/data1/current/BP-167317329-172.17.0.2-1732006193375/current, will proceed with Du for space computation calculation, 2024-11-19T08:49:54,789 WARN [Thread-2451 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:49:54,791 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x39d1d307e000e41b with lease ID 0x35972c03e54597c4: Processing first storage report for DS-db439388-acd8-4749-9031-08fd633655b5 from datanode DatanodeRegistration(127.0.0.1:34721, datanodeUuid=28680a1b-d028-416a-866a-d395537651a2, infoPort=45687, infoSecurePort=0, ipcPort=40529, storageInfo=lv=-57;cid=testClusterID;nsid=1299964231;c=1732006193375) 2024-11-19T08:49:54,791 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x39d1d307e000e41b with lease ID 0x35972c03e54597c4: from storage DS-db439388-acd8-4749-9031-08fd633655b5 node DatanodeRegistration(127.0.0.1:34721, datanodeUuid=28680a1b-d028-416a-866a-d395537651a2, infoPort=45687, infoSecurePort=0, ipcPort=40529, storageInfo=lv=-57;cid=testClusterID;nsid=1299964231;c=1732006193375), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T08:49:54,792 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x39d1d307e000e41b with lease ID 0x35972c03e54597c4: Processing first storage report for DS-2b84ef81-82fd-4ba7-80f8-38eba784cb62 from datanode DatanodeRegistration(127.0.0.1:34721, datanodeUuid=28680a1b-d028-416a-866a-d395537651a2, infoPort=45687, infoSecurePort=0, ipcPort=40529, storageInfo=lv=-57;cid=testClusterID;nsid=1299964231;c=1732006193375) 2024-11-19T08:49:54,792 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x39d1d307e000e41b with lease ID 0x35972c03e54597c4: from storage DS-2b84ef81-82fd-4ba7-80f8-38eba784cb62 node DatanodeRegistration(127.0.0.1:34721, datanodeUuid=28680a1b-d028-416a-866a-d395537651a2, infoPort=45687, infoSecurePort=0, ipcPort=40529, storageInfo=lv=-57;cid=testClusterID;nsid=1299964231;c=1732006193375), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:49:54,903 WARN [Thread-2498 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/cluster_545578ae-88c8-497e-8073-c171cce8f270/data/data3/current/BP-167317329-172.17.0.2-1732006193375/current, will proceed with Du for space computation calculation, 2024-11-19T08:49:54,903 WARN [Thread-2499 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/cluster_545578ae-88c8-497e-8073-c171cce8f270/data/data4/current/BP-167317329-172.17.0.2-1732006193375/current, will proceed with Du for space computation calculation, 2024-11-19T08:49:54,924 WARN [Thread-2474 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T08:49:54,926 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x417ba171ab77ff7a with lease ID 0x35972c03e54597c5: Processing first storage report for DS-59888521-264a-4c08-bccc-07850a63ab3a from datanode DatanodeRegistration(127.0.0.1:44929, datanodeUuid=f056802d-aa79-45f0-addd-e8c1ddf0023f, infoPort=35743, infoSecurePort=0, ipcPort=42329, storageInfo=lv=-57;cid=testClusterID;nsid=1299964231;c=1732006193375) 2024-11-19T08:49:54,926 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x417ba171ab77ff7a with lease ID 0x35972c03e54597c5: from storage DS-59888521-264a-4c08-bccc-07850a63ab3a node DatanodeRegistration(127.0.0.1:44929, datanodeUuid=f056802d-aa79-45f0-addd-e8c1ddf0023f, infoPort=35743, infoSecurePort=0, ipcPort=42329, storageInfo=lv=-57;cid=testClusterID;nsid=1299964231;c=1732006193375), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:49:54,926 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x417ba171ab77ff7a with lease ID 0x35972c03e54597c5: Processing first storage report for DS-0c74fa26-2a34-459c-bf07-2e7a273d784a from datanode DatanodeRegistration(127.0.0.1:44929, datanodeUuid=f056802d-aa79-45f0-addd-e8c1ddf0023f, infoPort=35743, infoSecurePort=0, ipcPort=42329, storageInfo=lv=-57;cid=testClusterID;nsid=1299964231;c=1732006193375) 2024-11-19T08:49:54,926 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x417ba171ab77ff7a with lease ID 0x35972c03e54597c5: from storage DS-0c74fa26-2a34-459c-bf07-2e7a273d784a node DatanodeRegistration(127.0.0.1:44929, datanodeUuid=f056802d-aa79-45f0-addd-e8c1ddf0023f, infoPort=35743, infoSecurePort=0, ipcPort=42329, storageInfo=lv=-57;cid=testClusterID;nsid=1299964231;c=1732006193375), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T08:49:54,933 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c 2024-11-19T08:49:54,936 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/cluster_545578ae-88c8-497e-8073-c171cce8f270/zookeeper_0, clientPort=62638, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/cluster_545578ae-88c8-497e-8073-c171cce8f270/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/cluster_545578ae-88c8-497e-8073-c171cce8f270/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T08:49:54,937 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62638 2024-11-19T08:49:54,937 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:49:54,938 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:49:54,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34721 is added to blk_1073741825_1001 (size=7) 2024-11-19T08:49:54,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44929 is added to blk_1073741825_1001 (size=7) 2024-11-19T08:49:54,949 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414 with version=8 2024-11-19T08:49:54,949 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34933/user/jenkins/test-data/51ad36c2-d4c5-5351-9de1-a1b639275dad/hbase-staging 2024-11-19T08:49:54,951 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3ab37fa97a98:0 server-side Connection retries=45 2024-11-19T08:49:54,951 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:49:54,951 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T08:49:54,951 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T08:49:54,951 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:49:54,951 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T08:49:54,951 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T08:49:54,951 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T08:49:54,952 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39571 2024-11-19T08:49:54,953 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39571 connecting to ZooKeeper ensemble=127.0.0.1:62638 2024-11-19T08:49:55,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:395710x0, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T08:49:55,002 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39571-0x1015392c1bc0000 connected 2024-11-19T08:49:55,070 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:49:55,072 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:49:55,074 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:49:55,075 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414, hbase.cluster.distributed=false 2024-11-19T08:49:55,077 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T08:49:55,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39571 2024-11-19T08:49:55,080 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39571 2024-11-19T08:49:55,081 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39571 2024-11-19T08:49:55,083 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39571 2024-11-19T08:49:55,084 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39571 2024-11-19T08:49:55,106 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3ab37fa97a98:0 server-side Connection retries=45 2024-11-19T08:49:55,106 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:49:55,106 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T08:49:55,106 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T08:49:55,106 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T08:49:55,106 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T08:49:55,106 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T08:49:55,107 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T08:49:55,107 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46457 2024-11-19T08:49:55,108 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46457 connecting to ZooKeeper ensemble=127.0.0.1:62638 2024-11-19T08:49:55,109 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:49:55,110 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:49:55,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:464570x0, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T08:49:55,120 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46457-0x1015392c1bc0001 connected 2024-11-19T08:49:55,120 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:49:55,120 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T08:49:55,121 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T08:49:55,122 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T08:49:55,123 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T08:49:55,135 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46457 2024-11-19T08:49:55,138 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46457 2024-11-19T08:49:55,139 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46457 2024-11-19T08:49:55,142 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46457 2024-11-19T08:49:55,144 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46457 2024-11-19T08:49:55,155 DEBUG [M:0;3ab37fa97a98:39571 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3ab37fa97a98:39571 2024-11-19T08:49:55,155 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3ab37fa97a98,39571,1732006194951 2024-11-19T08:49:55,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:49:55,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:49:55,161 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3ab37fa97a98,39571,1732006194951 2024-11-19T08:49:55,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T08:49:55,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:49:55,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:49:55,170 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T08:49:55,170 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3ab37fa97a98,39571,1732006194951 from backup master directory 2024-11-19T08:49:55,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3ab37fa97a98,39571,1732006194951 2024-11-19T08:49:55,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:49:55,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T08:49:55,178 WARN [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T08:49:55,178 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3ab37fa97a98,39571,1732006194951 2024-11-19T08:49:55,182 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/hbase.id] with ID: 6e26d3eb-71fd-4ecd-acdb-14a3cdd9390a 2024-11-19T08:49:55,182 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/.tmp/hbase.id 2024-11-19T08:49:55,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34721 is added to blk_1073741826_1002 (size=42) 2024-11-19T08:49:55,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44929 is added to blk_1073741826_1002 (size=42) 2024-11-19T08:49:55,192 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/.tmp/hbase.id]:[hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/hbase.id] 2024-11-19T08:49:55,202 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:49:55,202 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T08:49:55,204 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-19T08:49:55,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:49:55,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:49:55,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34721 is added to blk_1073741827_1003 (size=196) 2024-11-19T08:49:55,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44929 is added to blk_1073741827_1003 (size=196) 2024-11-19T08:49:55,218 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T08:49:55,218 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T08:49:55,218 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:49:55,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44929 is added to blk_1073741828_1004 (size=1189) 2024-11-19T08:49:55,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34721 is added to blk_1073741828_1004 (size=1189) 2024-11-19T08:49:55,230 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store 2024-11-19T08:49:55,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44929 is added to blk_1073741829_1005 (size=34) 2024-11-19T08:49:55,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34721 is added to blk_1073741829_1005 (size=34) 2024-11-19T08:49:55,237 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:49:55,237 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T08:49:55,237 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:49:55,237 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:49:55,237 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T08:49:55,237 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:49:55,237 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:49:55,237 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732006195237Disabling compacts and flushes for region at 1732006195237Disabling writes for close at 1732006195237Writing region close event to WAL at 1732006195237Closed at 1732006195237 2024-11-19T08:49:55,238 WARN [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/.initializing 2024-11-19T08:49:55,238 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/WALs/3ab37fa97a98,39571,1732006194951 2024-11-19T08:49:55,240 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C39571%2C1732006194951, suffix=, logDir=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/WALs/3ab37fa97a98,39571,1732006194951, archiveDir=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/oldWALs, maxLogs=10 2024-11-19T08:49:55,241 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C39571%2C1732006194951.1732006195240 2024-11-19T08:49:55,245 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/WALs/3ab37fa97a98,39571,1732006194951/3ab37fa97a98%2C39571%2C1732006194951.1732006195240 2024-11-19T08:49:55,246 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35743:35743),(127.0.0.1/127.0.0.1:45687:45687)] 2024-11-19T08:49:55,246 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T08:49:55,246 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:49:55,246 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:49:55,246 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:49:55,251 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:49:55,252 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T08:49:55,252 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:49:55,253 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:49:55,253 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:49:55,254 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T08:49:55,254 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:49:55,255 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:49:55,255 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:49:55,256 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T08:49:55,256 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:49:55,257 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:49:55,257 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:49:55,258 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T08:49:55,258 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:49:55,258 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T08:49:55,259 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:49:55,259 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:49:55,260 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:49:55,261 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:49:55,261 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:49:55,261 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T08:49:55,262 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T08:49:55,264 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T08:49:55,264 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=882840, jitterRate=0.12258964776992798}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T08:49:55,265 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732006195247Initializing all the Stores at 1732006195247Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006195247Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006195250 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006195250Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006195250Cleaning up temporary data from old regions at 1732006195261 (+11 ms)Region opened successfully at 1732006195265 (+4 ms) 2024-11-19T08:49:55,265 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T08:49:55,267 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3814a71e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3ab37fa97a98/172.17.0.2:0 2024-11-19T08:49:55,268 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T08:49:55,268 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T08:49:55,268 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T08:49:55,269 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T08:49:55,269 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T08:49:55,269 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T08:49:55,269 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T08:49:55,271 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T08:49:55,272 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T08:49:55,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:55,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:55,285 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T08:49:55,285 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T08:49:55,286 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T08:49:55,294 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T08:49:55,295 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T08:49:55,296 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T08:49:55,303 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T08:49:55,304 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T08:49:55,311 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T08:49:55,313 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T08:49:55,319 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T08:49:55,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T08:49:55,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T08:49:55,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:49:55,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:49:55,328 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3ab37fa97a98,39571,1732006194951, sessionid=0x1015392c1bc0000, setting cluster-up flag (Was=false) 2024-11-19T08:49:55,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:49:55,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:49:55,369 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T08:49:55,371 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3ab37fa97a98,39571,1732006194951 2024-11-19T08:49:55,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:49:55,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:49:55,411 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T08:49:55,412 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3ab37fa97a98,39571,1732006194951 2024-11-19T08:49:55,413 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T08:49:55,415 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T08:49:55,415 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T08:49:55,415 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T08:49:55,416 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3ab37fa97a98,39571,1732006194951 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T08:49:55,417 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:49:55,417 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:49:55,417 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:49:55,417 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=5, maxPoolSize=5 2024-11-19T08:49:55,417 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3ab37fa97a98:0, corePoolSize=10, maxPoolSize=10 2024-11-19T08:49:55,418 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:49:55,418 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=2, maxPoolSize=2 2024-11-19T08:49:55,418 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:49:55,423 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:49:55,423 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T08:49:55,423 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:49:55,424 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T08:49:55,427 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732006225427 2024-11-19T08:49:55,427 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T08:49:55,427 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T08:49:55,427 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T08:49:55,427 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T08:49:55,427 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T08:49:55,427 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T08:49:55,428 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:55,428 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T08:49:55,428 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T08:49:55,428 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T08:49:55,430 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T08:49:55,431 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T08:49:55,434 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732006195431,5,FailOnTimeoutGroup] 2024-11-19T08:49:55,436 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732006195435,5,FailOnTimeoutGroup] 2024-11-19T08:49:55,436 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:55,436 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T08:49:55,436 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:55,436 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:55,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44929 is added to blk_1073741831_1007 (size=1321) 2024-11-19T08:49:55,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34721 is added to blk_1073741831_1007 (size=1321) 2024-11-19T08:49:55,443 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T08:49:55,443 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414 2024-11-19T08:49:55,448 INFO [RS:0;3ab37fa97a98:46457 {}] regionserver.HRegionServer(746): ClusterId : 6e26d3eb-71fd-4ecd-acdb-14a3cdd9390a 2024-11-19T08:49:55,448 DEBUG [RS:0;3ab37fa97a98:46457 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T08:49:55,453 DEBUG [RS:0;3ab37fa97a98:46457 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T08:49:55,453 DEBUG [RS:0;3ab37fa97a98:46457 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T08:49:55,462 DEBUG [RS:0;3ab37fa97a98:46457 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T08:49:55,462 DEBUG [RS:0;3ab37fa97a98:46457 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17c970b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3ab37fa97a98/172.17.0.2:0 2024-11-19T08:49:55,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34721 is added to blk_1073741832_1008 (size=32) 2024-11-19T08:49:55,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44929 is added to blk_1073741832_1008 (size=32) 2024-11-19T08:49:55,464 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:49:55,473 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T08:49:55,474 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T08:49:55,474 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:49:55,475 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:49:55,475 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T08:49:55,476 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T08:49:55,476 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:49:55,477 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:49:55,478 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T08:49:55,479 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T08:49:55,479 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:49:55,479 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:49:55,480 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T08:49:55,482 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T08:49:55,482 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:49:55,482 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:49:55,482 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T08:49:55,483 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/data/hbase/meta/1588230740 2024-11-19T08:49:55,483 DEBUG [RS:0;3ab37fa97a98:46457 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3ab37fa97a98:46457 2024-11-19T08:49:55,483 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/data/hbase/meta/1588230740 2024-11-19T08:49:55,483 INFO [RS:0;3ab37fa97a98:46457 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T08:49:55,483 INFO [RS:0;3ab37fa97a98:46457 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T08:49:55,483 DEBUG [RS:0;3ab37fa97a98:46457 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T08:49:55,484 INFO [RS:0;3ab37fa97a98:46457 {}] regionserver.HRegionServer(2659): reportForDuty to master=3ab37fa97a98,39571,1732006194951 with port=46457, startcode=1732006195106 2024-11-19T08:49:55,484 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T08:49:55,484 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T08:49:55,484 DEBUG [RS:0;3ab37fa97a98:46457 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T08:49:55,485 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T08:49:55,487 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T08:49:55,487 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48605, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T08:49:55,488 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39571 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3ab37fa97a98,46457,1732006195106 2024-11-19T08:49:55,488 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39571 {}] master.ServerManager(517): Registering regionserver=3ab37fa97a98,46457,1732006195106 2024-11-19T08:49:55,489 DEBUG [RS:0;3ab37fa97a98:46457 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414 2024-11-19T08:49:55,489 DEBUG [RS:0;3ab37fa97a98:46457 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35205 2024-11-19T08:49:55,489 DEBUG [RS:0;3ab37fa97a98:46457 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T08:49:55,491 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T08:49:55,491 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=721101, jitterRate=-0.08307352662086487}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T08:49:55,492 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732006195464Initializing all the Stores at 1732006195465 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006195465Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006195473 (+8 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006195473Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006195473Cleaning up temporary data from old regions at 1732006195484 (+11 ms)Region opened successfully at 1732006195492 (+8 ms) 2024-11-19T08:49:55,492 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T08:49:55,492 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T08:49:55,492 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T08:49:55,492 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T08:49:55,492 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T08:49:55,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T08:49:55,495 DEBUG [RS:0;3ab37fa97a98:46457 {}] zookeeper.ZKUtil(111): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3ab37fa97a98,46457,1732006195106 2024-11-19T08:49:55,495 WARN [RS:0;3ab37fa97a98:46457 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T08:49:55,495 INFO [RS:0;3ab37fa97a98:46457 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:49:55,495 DEBUG [RS:0;3ab37fa97a98:46457 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/WALs/3ab37fa97a98,46457,1732006195106 2024-11-19T08:49:55,496 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T08:49:55,496 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732006195492Disabling compacts and flushes for region at 1732006195492Disabling writes for close at 1732006195492Writing region close event to WAL at 1732006195496 (+4 ms)Closed at 1732006195496 2024-11-19T08:49:55,497 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3ab37fa97a98,46457,1732006195106] 2024-11-19T08:49:55,497 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:49:55,497 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T08:49:55,497 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T08:49:55,498 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T08:49:55,499 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T08:49:55,505 INFO [RS:0;3ab37fa97a98:46457 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T08:49:55,506 INFO [RS:0;3ab37fa97a98:46457 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T08:49:55,506 INFO [RS:0;3ab37fa97a98:46457 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T08:49:55,506 INFO [RS:0;3ab37fa97a98:46457 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:55,510 INFO [RS:0;3ab37fa97a98:46457 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T08:49:55,511 INFO [RS:0;3ab37fa97a98:46457 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T08:49:55,511 INFO [RS:0;3ab37fa97a98:46457 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:55,511 DEBUG [RS:0;3ab37fa97a98:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:49:55,511 DEBUG [RS:0;3ab37fa97a98:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:49:55,511 DEBUG [RS:0;3ab37fa97a98:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:49:55,511 DEBUG [RS:0;3ab37fa97a98:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:49:55,511 DEBUG [RS:0;3ab37fa97a98:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:49:55,511 DEBUG [RS:0;3ab37fa97a98:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3ab37fa97a98:0, corePoolSize=2, maxPoolSize=2 2024-11-19T08:49:55,511 DEBUG [RS:0;3ab37fa97a98:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:49:55,511 DEBUG [RS:0;3ab37fa97a98:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:49:55,511 DEBUG [RS:0;3ab37fa97a98:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:49:55,511 DEBUG [RS:0;3ab37fa97a98:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:49:55,511 DEBUG [RS:0;3ab37fa97a98:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:49:55,512 DEBUG [RS:0;3ab37fa97a98:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3ab37fa97a98:0, corePoolSize=1, maxPoolSize=1 2024-11-19T08:49:55,512 DEBUG [RS:0;3ab37fa97a98:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3ab37fa97a98:0, corePoolSize=3, maxPoolSize=3 2024-11-19T08:49:55,512 DEBUG [RS:0;3ab37fa97a98:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3ab37fa97a98:0, corePoolSize=3, maxPoolSize=3 2024-11-19T08:49:55,515 INFO [RS:0;3ab37fa97a98:46457 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:55,515 INFO [RS:0;3ab37fa97a98:46457 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:55,515 INFO [RS:0;3ab37fa97a98:46457 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:55,515 INFO [RS:0;3ab37fa97a98:46457 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:55,515 INFO [RS:0;3ab37fa97a98:46457 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:55,516 INFO [RS:0;3ab37fa97a98:46457 {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,46457,1732006195106-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T08:49:55,533 INFO [RS:0;3ab37fa97a98:46457 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T08:49:55,533 INFO [RS:0;3ab37fa97a98:46457 {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,46457,1732006195106-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:55,534 INFO [RS:0;3ab37fa97a98:46457 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:55,534 INFO [RS:0;3ab37fa97a98:46457 {}] regionserver.Replication(171): 3ab37fa97a98,46457,1732006195106 started 2024-11-19T08:49:55,548 INFO [RS:0;3ab37fa97a98:46457 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:55,548 INFO [RS:0;3ab37fa97a98:46457 {}] regionserver.HRegionServer(1482): Serving as 3ab37fa97a98,46457,1732006195106, RpcServer on 3ab37fa97a98/172.17.0.2:46457, sessionid=0x1015392c1bc0001 2024-11-19T08:49:55,548 DEBUG [RS:0;3ab37fa97a98:46457 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T08:49:55,548 DEBUG [RS:0;3ab37fa97a98:46457 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3ab37fa97a98,46457,1732006195106 2024-11-19T08:49:55,548 DEBUG [RS:0;3ab37fa97a98:46457 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3ab37fa97a98,46457,1732006195106' 2024-11-19T08:49:55,548 DEBUG [RS:0;3ab37fa97a98:46457 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T08:49:55,549 DEBUG [RS:0;3ab37fa97a98:46457 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T08:49:55,549 DEBUG [RS:0;3ab37fa97a98:46457 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T08:49:55,549 DEBUG [RS:0;3ab37fa97a98:46457 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T08:49:55,549 DEBUG [RS:0;3ab37fa97a98:46457 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3ab37fa97a98,46457,1732006195106 2024-11-19T08:49:55,549 DEBUG [RS:0;3ab37fa97a98:46457 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3ab37fa97a98,46457,1732006195106' 2024-11-19T08:49:55,549 DEBUG [RS:0;3ab37fa97a98:46457 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T08:49:55,549 DEBUG [RS:0;3ab37fa97a98:46457 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T08:49:55,549 DEBUG [RS:0;3ab37fa97a98:46457 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T08:49:55,550 INFO [RS:0;3ab37fa97a98:46457 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T08:49:55,550 INFO [RS:0;3ab37fa97a98:46457 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T08:49:55,649 WARN [3ab37fa97a98:39571 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T08:49:55,652 INFO [RS:0;3ab37fa97a98:46457 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C46457%2C1732006195106, suffix=, logDir=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/WALs/3ab37fa97a98,46457,1732006195106, archiveDir=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/oldWALs, maxLogs=32 2024-11-19T08:49:55,652 INFO [RS:0;3ab37fa97a98:46457 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C46457%2C1732006195106.1732006195652 2024-11-19T08:49:55,660 INFO [RS:0;3ab37fa97a98:46457 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/WALs/3ab37fa97a98,46457,1732006195106/3ab37fa97a98%2C46457%2C1732006195106.1732006195652 2024-11-19T08:49:55,673 DEBUG [RS:0;3ab37fa97a98:46457 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45687:45687),(127.0.0.1/127.0.0.1:35743:35743)] 2024-11-19T08:49:55,899 DEBUG [3ab37fa97a98:39571 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T08:49:55,900 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3ab37fa97a98,46457,1732006195106 2024-11-19T08:49:55,901 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3ab37fa97a98,46457,1732006195106, state=OPENING 2024-11-19T08:49:55,952 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T08:49:55,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:49:55,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:49:55,961 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T08:49:55,961 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:49:55,962 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3ab37fa97a98,46457,1732006195106}] 2024-11-19T08:49:55,962 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:49:56,117 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T08:49:56,119 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38399, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T08:49:56,123 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T08:49:56,123 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:49:56,125 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3ab37fa97a98%2C46457%2C1732006195106.meta, suffix=.meta, logDir=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/WALs/3ab37fa97a98,46457,1732006195106, archiveDir=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/oldWALs, maxLogs=32 2024-11-19T08:49:56,126 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3ab37fa97a98%2C46457%2C1732006195106.meta.1732006196126.meta 2024-11-19T08:49:56,136 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/WALs/3ab37fa97a98,46457,1732006195106/3ab37fa97a98%2C46457%2C1732006195106.meta.1732006196126.meta 2024-11-19T08:49:56,144 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35743:35743),(127.0.0.1/127.0.0.1:45687:45687)] 2024-11-19T08:49:56,151 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T08:49:56,151 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T08:49:56,151 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T08:49:56,151 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T08:49:56,152 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T08:49:56,152 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T08:49:56,152 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T08:49:56,152 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T08:49:56,155 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T08:49:56,156 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T08:49:56,156 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:49:56,156 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:49:56,156 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T08:49:56,157 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T08:49:56,157 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:49:56,158 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:49:56,158 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T08:49:56,159 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T08:49:56,159 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:49:56,160 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:49:56,160 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T08:49:56,161 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T08:49:56,161 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T08:49:56,162 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T08:49:56,162 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T08:49:56,162 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/data/hbase/meta/1588230740 2024-11-19T08:49:56,164 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/data/hbase/meta/1588230740 2024-11-19T08:49:56,165 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T08:49:56,165 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T08:49:56,165 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T08:49:56,167 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T08:49:56,168 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=727156, jitterRate=-0.07537393271923065}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T08:49:56,168 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T08:49:56,168 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732006196152Writing region info on filesystem at 1732006196152Initializing all the Stores at 1732006196153 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006196153Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006196154 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732006196154Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732006196154Cleaning up temporary data from old regions at 1732006196165 (+11 ms)Running coprocessor post-open hooks at 1732006196168 (+3 ms)Region opened successfully at 1732006196168 2024-11-19T08:49:56,170 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732006196117 2024-11-19T08:49:56,172 DEBUG [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T08:49:56,172 INFO [RS_OPEN_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T08:49:56,173 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3ab37fa97a98,46457,1732006195106 2024-11-19T08:49:56,174 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3ab37fa97a98,46457,1732006195106, state=OPEN 2024-11-19T08:49:56,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T08:49:56,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T08:49:56,211 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3ab37fa97a98,46457,1732006195106 2024-11-19T08:49:56,211 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:49:56,211 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T08:49:56,213 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T08:49:56,213 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3ab37fa97a98,46457,1732006195106 in 250 msec 2024-11-19T08:49:56,215 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T08:49:56,215 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 716 msec 2024-11-19T08:49:56,216 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T08:49:56,216 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T08:49:56,217 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T08:49:56,217 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3ab37fa97a98,46457,1732006195106, seqNum=-1] 2024-11-19T08:49:56,217 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T08:49:56,218 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36989, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T08:49:56,223 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 808 msec 2024-11-19T08:49:56,223 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732006196223, completionTime=-1 2024-11-19T08:49:56,223 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T08:49:56,223 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T08:49:56,225 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T08:49:56,225 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732006256225 2024-11-19T08:49:56,225 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732006316225 2024-11-19T08:49:56,225 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-19T08:49:56,226 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,39571,1732006194951-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:56,226 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,39571,1732006194951-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:56,226 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,39571,1732006194951-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:56,226 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3ab37fa97a98:39571, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:56,226 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:56,226 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:56,227 DEBUG [master/3ab37fa97a98:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T08:49:56,229 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.051sec 2024-11-19T08:49:56,229 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T08:49:56,229 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T08:49:56,230 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T08:49:56,230 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T08:49:56,230 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T08:49:56,230 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,39571,1732006194951-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T08:49:56,230 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,39571,1732006194951-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T08:49:56,232 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T08:49:56,232 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T08:49:56,232 INFO [master/3ab37fa97a98:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3ab37fa97a98,39571,1732006194951-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T08:49:56,248 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4341028d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:49:56,248 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3ab37fa97a98,39571,-1 for getting cluster id 2024-11-19T08:49:56,248 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T08:49:56,249 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6e26d3eb-71fd-4ecd-acdb-14a3cdd9390a' 2024-11-19T08:49:56,250 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T08:49:56,250 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6e26d3eb-71fd-4ecd-acdb-14a3cdd9390a" 2024-11-19T08:49:56,250 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73fc411, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:49:56,250 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3ab37fa97a98,39571,-1] 2024-11-19T08:49:56,250 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T08:49:56,250 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:49:56,251 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43356, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T08:49:56,252 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b0925e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T08:49:56,252 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T08:49:56,253 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3ab37fa97a98,46457,1732006195106, seqNum=-1] 2024-11-19T08:49:56,253 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T08:49:56,254 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43516, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T08:49:56,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3ab37fa97a98,39571,1732006194951 2024-11-19T08:49:56,256 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T08:49:56,258 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T08:49:56,258 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T08:49:56,260 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/WALs/test.com,8080,1, archiveDir=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/oldWALs, maxLogs=32 2024-11-19T08:49:56,260 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732006196260 2024-11-19T08:49:56,265 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/WALs/test.com,8080,1/test.com%2C8080%2C1.1732006196260 2024-11-19T08:49:56,272 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45687:45687),(127.0.0.1/127.0.0.1:35743:35743)] 2024-11-19T08:49:56,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,36037,1732005987998/3ab37fa97a98%2C36037%2C1732005987998.1732005988237 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:56,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36955/user/jenkins/test-data/89d71961-9d10-4dc8-093d-69c60ec5d882/WALs/3ab37fa97a98,46031,1732005986641/3ab37fa97a98%2C46031%2C1732005986641.meta.1732005987841.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T08:49:56,277 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732006196277 2024-11-19T08:49:56,296 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,296 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,296 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,296 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,296 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,296 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/WALs/test.com,8080,1/test.com%2C8080%2C1.1732006196260 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/WALs/test.com,8080,1/test.com%2C8080%2C1.1732006196277 2024-11-19T08:49:56,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44929 is added to blk_1073741835_1011 (size=93) 2024-11-19T08:49:56,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34721 is added to blk_1073741835_1011 (size=93) 2024-11-19T08:49:56,307 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35743:35743),(127.0.0.1/127.0.0.1:45687:45687)] 2024-11-19T08:49:56,307 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/WALs/test.com,8080,1/test.com%2C8080%2C1.1732006196260 to hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/oldWALs/test.com%2C8080%2C1.1732006196260 2024-11-19T08:49:56,307 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,307 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,307 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,308 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,308 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34721 is added to blk_1073741836_1012 (size=93) 2024-11-19T08:49:56,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44929 is added to blk_1073741836_1012 (size=93) 2024-11-19T08:49:56,317 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/oldWALs 2024-11-19T08:49:56,317 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732006196277) 2024-11-19T08:49:56,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T08:49:56,317 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T08:49:56,317 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:49:56,317 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:49:56,317 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:49:56,317 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T08:49:56,317 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T08:49:56,318 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1228395001, stopped=false 2024-11-19T08:49:56,318 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3ab37fa97a98,39571,1732006194951 2024-11-19T08:49:56,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T08:49:56,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T08:49:56,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:49:56,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:49:56,336 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T08:49:56,336 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T08:49:56,336 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:49:56,336 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:49:56,336 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3ab37fa97a98,46457,1732006195106' ***** 2024-11-19T08:49:56,336 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T08:49:56,337 INFO [RS:0;3ab37fa97a98:46457 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T08:49:56,337 INFO [RS:0;3ab37fa97a98:46457 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T08:49:56,337 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T08:49:56,337 INFO [RS:0;3ab37fa97a98:46457 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T08:49:56,337 INFO [RS:0;3ab37fa97a98:46457 {}] regionserver.HRegionServer(959): stopping server 3ab37fa97a98,46457,1732006195106 2024-11-19T08:49:56,337 INFO [RS:0;3ab37fa97a98:46457 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T08:49:56,337 INFO [RS:0;3ab37fa97a98:46457 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3ab37fa97a98:46457. 2024-11-19T08:49:56,337 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:49:56,337 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T08:49:56,337 DEBUG [RS:0;3ab37fa97a98:46457 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T08:49:56,337 DEBUG [RS:0;3ab37fa97a98:46457 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:49:56,337 INFO [RS:0;3ab37fa97a98:46457 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T08:49:56,337 INFO [RS:0;3ab37fa97a98:46457 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T08:49:56,337 INFO [RS:0;3ab37fa97a98:46457 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T08:49:56,337 INFO [RS:0;3ab37fa97a98:46457 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T08:49:56,337 INFO [RS:0;3ab37fa97a98:46457 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-19T08:49:56,337 DEBUG [RS:0;3ab37fa97a98:46457 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-19T08:49:56,338 DEBUG [RS:0;3ab37fa97a98:46457 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-19T08:49:56,338 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T08:49:56,338 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T08:49:56,338 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T08:49:56,338 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T08:49:56,338 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T08:49:56,338 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-19T08:49:56,359 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/data/hbase/meta/1588230740/.tmp/ns/15265421bdbf4fbc80ba64a86650f139 is 43, key is default/ns:d/1732006196219/Put/seqid=0 2024-11-19T08:49:56,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44929 is added to blk_1073741837_1013 (size=5153) 2024-11-19T08:49:56,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34721 is added to blk_1073741837_1013 (size=5153) 2024-11-19T08:49:56,366 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/data/hbase/meta/1588230740/.tmp/ns/15265421bdbf4fbc80ba64a86650f139 2024-11-19T08:49:56,371 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/data/hbase/meta/1588230740/.tmp/ns/15265421bdbf4fbc80ba64a86650f139 as hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/data/hbase/meta/1588230740/ns/15265421bdbf4fbc80ba64a86650f139 2024-11-19T08:49:56,375 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/data/hbase/meta/1588230740/ns/15265421bdbf4fbc80ba64a86650f139, entries=2, sequenceid=6, filesize=5.0 K 2024-11-19T08:49:56,376 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 38ms, sequenceid=6, compaction requested=false 2024-11-19T08:49:56,380 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-19T08:49:56,380 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T08:49:56,381 INFO [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T08:49:56,381 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732006196338Running coprocessor pre-close hooks at 1732006196338Disabling compacts and flushes for region at 1732006196338Disabling writes for close at 1732006196338Obtaining lock to block concurrent updates at 1732006196338Preparing flush snapshotting stores in 1588230740 at 1732006196338Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732006196338Flushing stores of hbase:meta,,1.1588230740 at 1732006196338Flushing 1588230740/ns: creating writer at 1732006196339 (+1 ms)Flushing 1588230740/ns: appending metadata at 1732006196358 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1732006196359 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38c9d0ad: reopening flushed file at 1732006196370 (+11 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 38ms, sequenceid=6, compaction requested=false at 1732006196376 (+6 ms)Writing region close event to WAL at 1732006196377 (+1 ms)Running coprocessor post-close hooks at 1732006196380 (+3 ms)Closed at 1732006196381 (+1 ms) 2024-11-19T08:49:56,381 DEBUG [RS_CLOSE_META-regionserver/3ab37fa97a98:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T08:49:56,516 INFO [regionserver/3ab37fa97a98:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T08:49:56,516 INFO [regionserver/3ab37fa97a98:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T08:49:56,538 INFO [RS:0;3ab37fa97a98:46457 {}] regionserver.HRegionServer(976): stopping server 3ab37fa97a98,46457,1732006195106; all regions closed. 2024-11-19T08:49:56,538 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,538 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,538 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,539 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,539 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34721 is added to blk_1073741834_1010 (size=1152) 2024-11-19T08:49:56,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44929 is added to blk_1073741834_1010 (size=1152) 2024-11-19T08:49:56,543 DEBUG [RS:0;3ab37fa97a98:46457 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/oldWALs 2024-11-19T08:49:56,543 INFO [RS:0;3ab37fa97a98:46457 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3ab37fa97a98%2C46457%2C1732006195106.meta:.meta(num 1732006196126) 2024-11-19T08:49:56,544 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,544 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,544 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,544 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,544 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44929 is added to blk_1073741833_1009 (size=93) 2024-11-19T08:49:56,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34721 is added to blk_1073741833_1009 (size=93) 2024-11-19T08:49:56,548 DEBUG [RS:0;3ab37fa97a98:46457 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/oldWALs 2024-11-19T08:49:56,548 INFO [RS:0;3ab37fa97a98:46457 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3ab37fa97a98%2C46457%2C1732006195106:(num 1732006195652) 2024-11-19T08:49:56,548 DEBUG [RS:0;3ab37fa97a98:46457 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T08:49:56,548 INFO [RS:0;3ab37fa97a98:46457 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T08:49:56,548 INFO [RS:0;3ab37fa97a98:46457 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T08:49:56,548 INFO [RS:0;3ab37fa97a98:46457 {}] hbase.ChoreService(370): Chore service for: regionserver/3ab37fa97a98:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-19T08:49:56,548 INFO [RS:0;3ab37fa97a98:46457 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T08:49:56,548 INFO [regionserver/3ab37fa97a98:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T08:49:56,548 INFO [RS:0;3ab37fa97a98:46457 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46457 2024-11-19T08:49:56,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T08:49:56,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3ab37fa97a98,46457,1732006195106 2024-11-19T08:49:56,577 INFO [RS:0;3ab37fa97a98:46457 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T08:49:56,586 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3ab37fa97a98,46457,1732006195106] 2024-11-19T08:49:56,594 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3ab37fa97a98,46457,1732006195106 already deleted, retry=false 2024-11-19T08:49:56,594 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3ab37fa97a98,46457,1732006195106 expired; onlineServers=0 2024-11-19T08:49:56,594 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3ab37fa97a98,39571,1732006194951' ***** 2024-11-19T08:49:56,594 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T08:49:56,594 INFO [M:0;3ab37fa97a98:39571 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T08:49:56,594 INFO [M:0;3ab37fa97a98:39571 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T08:49:56,594 DEBUG [M:0;3ab37fa97a98:39571 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T08:49:56,594 DEBUG [M:0;3ab37fa97a98:39571 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T08:49:56,594 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T08:49:56,594 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732006195435 {}] cleaner.HFileCleaner(306): Exit Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.small.0-1732006195435,5,FailOnTimeoutGroup] 2024-11-19T08:49:56,594 DEBUG [master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732006195431 {}] cleaner.HFileCleaner(306): Exit Thread[master/3ab37fa97a98:0:becomeActiveMaster-HFileCleaner.large.0-1732006195431,5,FailOnTimeoutGroup] 2024-11-19T08:49:56,595 INFO [M:0;3ab37fa97a98:39571 {}] hbase.ChoreService(370): Chore service for: master/3ab37fa97a98:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T08:49:56,595 INFO [M:0;3ab37fa97a98:39571 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T08:49:56,595 DEBUG [M:0;3ab37fa97a98:39571 {}] master.HMaster(1795): Stopping service threads 2024-11-19T08:49:56,595 INFO [M:0;3ab37fa97a98:39571 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T08:49:56,595 INFO [M:0;3ab37fa97a98:39571 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T08:49:56,595 INFO [M:0;3ab37fa97a98:39571 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T08:49:56,595 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T08:49:56,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T08:49:56,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T08:49:56,603 DEBUG [M:0;3ab37fa97a98:39571 {}] zookeeper.ZKUtil(347): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T08:49:56,603 WARN [M:0;3ab37fa97a98:39571 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T08:49:56,603 INFO [M:0;3ab37fa97a98:39571 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/.lastflushedseqids 2024-11-19T08:49:56,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34721 is added to blk_1073741838_1014 (size=99) 2024-11-19T08:49:56,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44929 is added to blk_1073741838_1014 (size=99) 2024-11-19T08:49:56,608 INFO [M:0;3ab37fa97a98:39571 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T08:49:56,608 INFO [M:0;3ab37fa97a98:39571 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T08:49:56,608 DEBUG [M:0;3ab37fa97a98:39571 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T08:49:56,608 INFO [M:0;3ab37fa97a98:39571 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:49:56,608 DEBUG [M:0;3ab37fa97a98:39571 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:49:56,608 DEBUG [M:0;3ab37fa97a98:39571 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T08:49:56,608 DEBUG [M:0;3ab37fa97a98:39571 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:49:56,608 INFO [M:0;3ab37fa97a98:39571 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-19T08:49:56,624 DEBUG [M:0;3ab37fa97a98:39571 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/17777ccfa4284eafa912ee25e5453d80 is 82, key is hbase:meta,,1/info:regioninfo/1732006196173/Put/seqid=0 2024-11-19T08:49:56,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44929 is added to blk_1073741839_1015 (size=5672) 2024-11-19T08:49:56,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34721 is added to blk_1073741839_1015 (size=5672) 2024-11-19T08:49:56,629 INFO [M:0;3ab37fa97a98:39571 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/17777ccfa4284eafa912ee25e5453d80 2024-11-19T08:49:56,646 DEBUG [M:0;3ab37fa97a98:39571 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bb62a679f81345b5b814fed95468beed is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732006196222/Put/seqid=0 2024-11-19T08:49:56,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44929 is added to blk_1073741840_1016 (size=5275) 2024-11-19T08:49:56,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34721 is added to blk_1073741840_1016 (size=5275) 2024-11-19T08:49:56,651 INFO [M:0;3ab37fa97a98:39571 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bb62a679f81345b5b814fed95468beed 2024-11-19T08:49:56,669 DEBUG [M:0;3ab37fa97a98:39571 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/16e17abfca14422fa3a3a7a5eca7f8d2 is 69, key is 3ab37fa97a98,46457,1732006195106/rs:state/1732006195488/Put/seqid=0 2024-11-19T08:49:56,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34721 is added to blk_1073741841_1017 (size=5156) 2024-11-19T08:49:56,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44929 is added to blk_1073741841_1017 (size=5156) 2024-11-19T08:49:56,674 INFO [M:0;3ab37fa97a98:39571 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/16e17abfca14422fa3a3a7a5eca7f8d2 2024-11-19T08:49:56,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:49:56,686 INFO [RS:0;3ab37fa97a98:46457 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T08:49:56,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x1015392c1bc0001, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:49:56,686 INFO [RS:0;3ab37fa97a98:46457 {}] regionserver.HRegionServer(1031): Exiting; stopping=3ab37fa97a98,46457,1732006195106; zookeeper connection closed. 2024-11-19T08:49:56,686 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3eb9e75c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3eb9e75c 2024-11-19T08:49:56,687 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T08:49:56,693 DEBUG [M:0;3ab37fa97a98:39571 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a58fb6b642ec49a1b3fc9b92b8306441 is 52, key is load_balancer_on/state:d/1732006196257/Put/seqid=0 2024-11-19T08:49:56,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34721 is added to blk_1073741842_1018 (size=5056) 2024-11-19T08:49:56,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44929 is added to blk_1073741842_1018 (size=5056) 2024-11-19T08:49:56,698 INFO [M:0;3ab37fa97a98:39571 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a58fb6b642ec49a1b3fc9b92b8306441 2024-11-19T08:49:56,703 DEBUG [M:0;3ab37fa97a98:39571 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/17777ccfa4284eafa912ee25e5453d80 as hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/17777ccfa4284eafa912ee25e5453d80 2024-11-19T08:49:56,707 INFO [M:0;3ab37fa97a98:39571 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/17777ccfa4284eafa912ee25e5453d80, entries=8, sequenceid=29, filesize=5.5 K 2024-11-19T08:49:56,708 DEBUG [M:0;3ab37fa97a98:39571 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bb62a679f81345b5b814fed95468beed as hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bb62a679f81345b5b814fed95468beed 2024-11-19T08:49:56,714 INFO [M:0;3ab37fa97a98:39571 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bb62a679f81345b5b814fed95468beed, entries=3, sequenceid=29, filesize=5.2 K 2024-11-19T08:49:56,715 DEBUG [M:0;3ab37fa97a98:39571 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/16e17abfca14422fa3a3a7a5eca7f8d2 as hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/16e17abfca14422fa3a3a7a5eca7f8d2 2024-11-19T08:49:56,720 INFO [M:0;3ab37fa97a98:39571 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/16e17abfca14422fa3a3a7a5eca7f8d2, entries=1, sequenceid=29, filesize=5.0 K 2024-11-19T08:49:56,721 DEBUG [M:0;3ab37fa97a98:39571 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a58fb6b642ec49a1b3fc9b92b8306441 as hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a58fb6b642ec49a1b3fc9b92b8306441 2024-11-19T08:49:56,726 INFO [M:0;3ab37fa97a98:39571 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35205/user/jenkins/test-data/c33dba65-e4f6-4035-cc83-d63418fd6414/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a58fb6b642ec49a1b3fc9b92b8306441, entries=1, sequenceid=29, filesize=4.9 K 2024-11-19T08:49:56,727 INFO [M:0;3ab37fa97a98:39571 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 119ms, sequenceid=29, compaction requested=false 2024-11-19T08:49:56,728 INFO [M:0;3ab37fa97a98:39571 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T08:49:56,728 DEBUG [M:0;3ab37fa97a98:39571 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732006196608Disabling compacts and flushes for region at 1732006196608Disabling writes for close at 1732006196608Obtaining lock to block concurrent updates at 1732006196608Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732006196608Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732006196609 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732006196609Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732006196609Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732006196624 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732006196624Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732006196633 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732006196646 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732006196646Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732006196655 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732006196668 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732006196668Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732006196678 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732006196692 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732006196693 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6eb58548: reopening flushed file at 1732006196702 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b816fc4: reopening flushed file at 1732006196707 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6bd9f4c9: reopening flushed file at 1732006196714 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5dacad9c: reopening flushed file at 1732006196720 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 119ms, sequenceid=29, compaction requested=false at 1732006196727 (+7 ms)Writing region close event to WAL at 1732006196728 (+1 ms)Closed at 1732006196728 2024-11-19T08:49:56,729 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,729 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,729 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,729 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,729 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T08:49:56,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34721 is added to blk_1073741830_1006 (size=10311) 2024-11-19T08:49:56,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44929 is added to blk_1073741830_1006 (size=10311) 2024-11-19T08:49:56,734 INFO [M:0;3ab37fa97a98:39571 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T08:49:56,734 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T08:49:56,734 INFO [M:0;3ab37fa97a98:39571 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39571 2024-11-19T08:49:56,735 INFO [M:0;3ab37fa97a98:39571 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T08:49:56,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:49:56,861 INFO [M:0;3ab37fa97a98:39571 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T08:49:56,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39571-0x1015392c1bc0000, quorum=127.0.0.1:62638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T08:49:56,865 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30a5c3a8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:49:56,865 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4e7f38a5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:49:56,865 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:49:56,865 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20eba1fc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:49:56,865 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68dc7da0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/hadoop.log.dir/,STOPPED} 2024-11-19T08:49:56,866 WARN [BP-167317329-172.17.0.2-1732006193375 heartbeating to localhost/127.0.0.1:35205 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:49:56,866 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:49:56,866 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:49:56,866 WARN [BP-167317329-172.17.0.2-1732006193375 heartbeating to localhost/127.0.0.1:35205 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-167317329-172.17.0.2-1732006193375 (Datanode Uuid f056802d-aa79-45f0-addd-e8c1ddf0023f) service to localhost/127.0.0.1:35205 2024-11-19T08:49:56,867 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/cluster_545578ae-88c8-497e-8073-c171cce8f270/data/data3/current/BP-167317329-172.17.0.2-1732006193375 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:49:56,867 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/cluster_545578ae-88c8-497e-8073-c171cce8f270/data/data4/current/BP-167317329-172.17.0.2-1732006193375 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:49:56,867 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:49:56,869 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6447bc52{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T08:49:56,869 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@44454b52{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:49:56,869 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:49:56,869 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@571f6f1a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:49:56,869 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25e8389f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/hadoop.log.dir/,STOPPED} 2024-11-19T08:49:56,870 WARN [BP-167317329-172.17.0.2-1732006193375 heartbeating to localhost/127.0.0.1:35205 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T08:49:56,870 WARN [BP-167317329-172.17.0.2-1732006193375 heartbeating to localhost/127.0.0.1:35205 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-167317329-172.17.0.2-1732006193375 (Datanode Uuid 28680a1b-d028-416a-866a-d395537651a2) service to localhost/127.0.0.1:35205 2024-11-19T08:49:56,870 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T08:49:56,870 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T08:49:56,871 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/cluster_545578ae-88c8-497e-8073-c171cce8f270/data/data1/current/BP-167317329-172.17.0.2-1732006193375 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:49:56,871 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/cluster_545578ae-88c8-497e-8073-c171cce8f270/data/data2/current/BP-167317329-172.17.0.2-1732006193375 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T08:49:56,871 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T08:49:56,875 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5cf43c00{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T08:49:56,876 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@951dead{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T08:49:56,876 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T08:49:56,876 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32ef523b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T08:49:56,876 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f1f87c1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/89ad4f2e-0fc4-fde9-ed9d-0496d73f571c/hadoop.log.dir/,STOPPED} 2024-11-19T08:49:56,881 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T08:49:56,902 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T08:49:56,910 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=266 (was 226) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:35205 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35205 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:35205 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35205 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:35205 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:35205 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35205 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35205 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=538 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=208 (was 182) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6031 (was 4845) - AvailableMemoryMB LEAK? -