2024-11-19 01:07:09,445 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-19 01:07:09,459 main DEBUG Took 0.012120 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-19 01:07:09,460 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-19 01:07:09,460 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-19 01:07:09,461 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-19 01:07:09,463 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 01:07:09,471 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-19 01:07:09,487 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 01:07:09,489 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 01:07:09,490 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 01:07:09,490 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 01:07:09,491 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 01:07:09,491 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 01:07:09,492 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 01:07:09,492 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 01:07:09,493 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 01:07:09,493 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 01:07:09,494 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 01:07:09,495 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 01:07:09,496 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 01:07:09,496 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 01:07:09,497 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 01:07:09,497 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 01:07:09,498 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 01:07:09,498 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 01:07:09,499 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 01:07:09,499 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 01:07:09,500 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 01:07:09,500 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 01:07:09,501 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 01:07:09,501 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 01:07:09,502 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 01:07:09,502 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-19 01:07:09,504 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 01:07:09,508 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-19 01:07:09,510 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-19 01:07:09,511 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-19 01:07:09,512 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-19 01:07:09,513 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-19 01:07:09,525 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-19 01:07:09,529 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-19 01:07:09,531 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-19 01:07:09,532 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-19 01:07:09,532 main DEBUG createAppenders(={Console}) 2024-11-19 01:07:09,533 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-19 01:07:09,534 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-19 01:07:09,534 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-19 01:07:09,535 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-19 01:07:09,535 main DEBUG OutputStream closed 2024-11-19 01:07:09,535 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-19 01:07:09,536 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-19 01:07:09,536 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-19 01:07:09,631 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-19 01:07:09,633 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-19 01:07:09,634 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-19 01:07:09,636 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-19 01:07:09,636 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-19 01:07:09,637 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-19 01:07:09,637 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-19 01:07:09,637 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-19 01:07:09,638 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-19 01:07:09,638 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-19 01:07:09,638 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-19 01:07:09,639 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-19 01:07:09,639 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-19 01:07:09,639 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-19 01:07:09,639 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-19 01:07:09,640 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-19 01:07:09,640 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-19 01:07:09,641 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-19 01:07:09,644 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-19 01:07:09,644 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-19 01:07:09,644 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-19 01:07:09,645 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-19T01:07:09,960 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5 2024-11-19 01:07:09,964 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-19 01:07:09,965 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-19T01:07:09,977 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-19T01:07:10,017 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=194, ProcessCount=11, AvailableMemoryMB=5224 2024-11-19T01:07:10,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T01:07:10,034 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/cluster_d6304920-e7fc-4be0-8dd4-41eb67cface4, deleteOnExit=true 2024-11-19T01:07:10,034 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T01:07:10,035 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/test.cache.data in system properties and HBase conf 2024-11-19T01:07:10,036 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T01:07:10,036 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/hadoop.log.dir in system properties and HBase conf 2024-11-19T01:07:10,037 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T01:07:10,037 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T01:07:10,038 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T01:07:10,123 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-19T01:07:10,216 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T01:07:10,220 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T01:07:10,220 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T01:07:10,221 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T01:07:10,221 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T01:07:10,222 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T01:07:10,222 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T01:07:10,222 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T01:07:10,223 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T01:07:10,223 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T01:07:10,224 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/nfs.dump.dir in system properties and HBase conf 2024-11-19T01:07:10,224 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/java.io.tmpdir in system properties and HBase conf 2024-11-19T01:07:10,224 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T01:07:10,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T01:07:10,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T01:07:10,738 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T01:07:11,104 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-19T01:07:11,202 INFO [Time-limited test {}] log.Log(170): Logging initialized @2497ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-19T01:07:11,301 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:07:11,387 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:07:11,411 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:07:11,411 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:07:11,413 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T01:07:11,428 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:07:11,431 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:07:11,432 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:07:11,653 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/java.io.tmpdir/jetty-localhost-46161-hadoop-hdfs-3_4_1-tests_jar-_-any-7106608980164117351/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T01:07:11,663 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:46161} 2024-11-19T01:07:11,663 INFO [Time-limited test {}] server.Server(415): Started @2959ms 2024-11-19T01:07:11,696 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T01:07:12,074 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:07:12,081 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:07:12,083 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:07:12,083 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:07:12,083 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T01:07:12,084 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:07:12,084 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:07:12,208 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/java.io.tmpdir/jetty-localhost-44091-hadoop-hdfs-3_4_1-tests_jar-_-any-248943021254519852/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:07:12,210 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:44091} 2024-11-19T01:07:12,210 INFO [Time-limited test {}] server.Server(415): Started @3506ms 2024-11-19T01:07:12,267 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:07:12,398 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:07:12,408 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:07:12,410 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:07:12,410 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:07:12,410 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T01:07:12,413 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:07:12,414 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:07:12,582 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/java.io.tmpdir/jetty-localhost-43839-hadoop-hdfs-3_4_1-tests_jar-_-any-2769934521793539309/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:07:12,583 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:43839} 2024-11-19T01:07:12,583 INFO [Time-limited test {}] server.Server(415): Started @3879ms 2024-11-19T01:07:12,585 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:07:12,757 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/cluster_d6304920-e7fc-4be0-8dd4-41eb67cface4/data/data1/current/BP-348894987-172.17.0.2-1731978430850/current, will proceed with Du for space computation calculation, 2024-11-19T01:07:12,757 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/cluster_d6304920-e7fc-4be0-8dd4-41eb67cface4/data/data2/current/BP-348894987-172.17.0.2-1731978430850/current, will proceed with Du for space computation calculation, 2024-11-19T01:07:12,757 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/cluster_d6304920-e7fc-4be0-8dd4-41eb67cface4/data/data3/current/BP-348894987-172.17.0.2-1731978430850/current, will proceed with Du for space computation calculation, 2024-11-19T01:07:12,757 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/cluster_d6304920-e7fc-4be0-8dd4-41eb67cface4/data/data4/current/BP-348894987-172.17.0.2-1731978430850/current, will proceed with Du for space computation calculation, 2024-11-19T01:07:12,816 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:07:12,817 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:07:12,895 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x45aec5f875fc48af with lease ID 0xf6b7708ee836caf9: Processing first storage report for DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4 from datanode DatanodeRegistration(127.0.0.1:38171, datanodeUuid=c89a3ffa-f785-4086-94ac-6ec8e19eb86f, infoPort=41887, infoSecurePort=0, ipcPort=45999, storageInfo=lv=-57;cid=testClusterID;nsid=1344744012;c=1731978430850) 2024-11-19T01:07:12,897 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x45aec5f875fc48af with lease ID 0xf6b7708ee836caf9: from storage DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4 node DatanodeRegistration(127.0.0.1:38171, datanodeUuid=c89a3ffa-f785-4086-94ac-6ec8e19eb86f, infoPort=41887, infoSecurePort=0, ipcPort=45999, storageInfo=lv=-57;cid=testClusterID;nsid=1344744012;c=1731978430850), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-19T01:07:12,898 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcf47d2411a2f4dae with lease ID 0xf6b7708ee836cafa: Processing first storage report for DS-d869a2f4-c604-4854-80c3-b575a230e78e from datanode DatanodeRegistration(127.0.0.1:45823, datanodeUuid=b8ca2166-9be6-456e-aa64-56cc6ec3bb13, infoPort=43475, infoSecurePort=0, ipcPort=38501, storageInfo=lv=-57;cid=testClusterID;nsid=1344744012;c=1731978430850) 2024-11-19T01:07:12,898 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcf47d2411a2f4dae with lease ID 0xf6b7708ee836cafa: from storage DS-d869a2f4-c604-4854-80c3-b575a230e78e node DatanodeRegistration(127.0.0.1:45823, datanodeUuid=b8ca2166-9be6-456e-aa64-56cc6ec3bb13, infoPort=43475, infoSecurePort=0, ipcPort=38501, storageInfo=lv=-57;cid=testClusterID;nsid=1344744012;c=1731978430850), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T01:07:12,899 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x45aec5f875fc48af with lease ID 0xf6b7708ee836caf9: Processing first storage report for DS-67d5dde5-e70f-4179-a66f-e88d6707ef86 from datanode DatanodeRegistration(127.0.0.1:38171, datanodeUuid=c89a3ffa-f785-4086-94ac-6ec8e19eb86f, infoPort=41887, infoSecurePort=0, ipcPort=45999, storageInfo=lv=-57;cid=testClusterID;nsid=1344744012;c=1731978430850) 2024-11-19T01:07:12,899 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x45aec5f875fc48af with lease ID 0xf6b7708ee836caf9: from storage DS-67d5dde5-e70f-4179-a66f-e88d6707ef86 node DatanodeRegistration(127.0.0.1:38171, datanodeUuid=c89a3ffa-f785-4086-94ac-6ec8e19eb86f, infoPort=41887, infoSecurePort=0, ipcPort=45999, storageInfo=lv=-57;cid=testClusterID;nsid=1344744012;c=1731978430850), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T01:07:12,899 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcf47d2411a2f4dae with lease ID 0xf6b7708ee836cafa: Processing first storage report for DS-951673b6-00d5-4d12-ac62-bcfbefe13b4d from datanode DatanodeRegistration(127.0.0.1:45823, datanodeUuid=b8ca2166-9be6-456e-aa64-56cc6ec3bb13, infoPort=43475, infoSecurePort=0, ipcPort=38501, storageInfo=lv=-57;cid=testClusterID;nsid=1344744012;c=1731978430850) 2024-11-19T01:07:12,899 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcf47d2411a2f4dae with lease ID 0xf6b7708ee836cafa: from storage DS-951673b6-00d5-4d12-ac62-bcfbefe13b4d node DatanodeRegistration(127.0.0.1:45823, datanodeUuid=b8ca2166-9be6-456e-aa64-56cc6ec3bb13, infoPort=43475, infoSecurePort=0, ipcPort=38501, storageInfo=lv=-57;cid=testClusterID;nsid=1344744012;c=1731978430850), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:07:13,009 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5 2024-11-19T01:07:13,093 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/cluster_d6304920-e7fc-4be0-8dd4-41eb67cface4/zookeeper_0, clientPort=59946, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/cluster_d6304920-e7fc-4be0-8dd4-41eb67cface4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/cluster_d6304920-e7fc-4be0-8dd4-41eb67cface4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T01:07:13,103 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59946 2024-11-19T01:07:13,113 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:07:13,116 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:07:13,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741825_1001 (size=7) 2024-11-19T01:07:13,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741825_1001 (size=7) 2024-11-19T01:07:13,766 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38 with version=8 2024-11-19T01:07:13,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/hbase-staging 2024-11-19T01:07:13,858 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-19T01:07:14,108 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5134ffc85563:0 server-side Connection retries=45 2024-11-19T01:07:14,120 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:07:14,120 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T01:07:14,125 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T01:07:14,125 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:07:14,125 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T01:07:14,262 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T01:07:14,322 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-19T01:07:14,331 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-19T01:07:14,335 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T01:07:14,361 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 24423 (auto-detected) 2024-11-19T01:07:14,363 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-19T01:07:14,382 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34043 2024-11-19T01:07:14,402 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34043 connecting to ZooKeeper ensemble=127.0.0.1:59946 2024-11-19T01:07:14,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:340430x0, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T01:07:14,438 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34043-0x101088886dd0000 connected 2024-11-19T01:07:14,464 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:07:14,467 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:07:14,477 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:07:14,481 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38, hbase.cluster.distributed=false 2024-11-19T01:07:14,505 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T01:07:14,509 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34043 2024-11-19T01:07:14,510 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34043 2024-11-19T01:07:14,510 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34043 2024-11-19T01:07:14,511 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34043 2024-11-19T01:07:14,511 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34043 2024-11-19T01:07:14,632 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5134ffc85563:0 server-side Connection retries=45 2024-11-19T01:07:14,634 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:07:14,635 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T01:07:14,635 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T01:07:14,635 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:07:14,636 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T01:07:14,640 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T01:07:14,643 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T01:07:14,644 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45903 2024-11-19T01:07:14,645 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45903 connecting to ZooKeeper ensemble=127.0.0.1:59946 2024-11-19T01:07:14,647 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:07:14,651 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:07:14,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:459030x0, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T01:07:14,659 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:459030x0, quorum=127.0.0.1:59946, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:07:14,659 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45903-0x101088886dd0001 connected 2024-11-19T01:07:14,664 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T01:07:14,672 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T01:07:14,675 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45903-0x101088886dd0001, quorum=127.0.0.1:59946, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T01:07:14,680 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45903-0x101088886dd0001, quorum=127.0.0.1:59946, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T01:07:14,681 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45903 2024-11-19T01:07:14,681 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45903 2024-11-19T01:07:14,682 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45903 2024-11-19T01:07:14,684 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45903 2024-11-19T01:07:14,684 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45903 2024-11-19T01:07:14,700 DEBUG [M:0;5134ffc85563:34043 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5134ffc85563:34043 2024-11-19T01:07:14,701 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5134ffc85563,34043,1731978433911 2024-11-19T01:07:14,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45903-0x101088886dd0001, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:07:14,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:07:14,710 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5134ffc85563,34043,1731978433911 2024-11-19T01:07:14,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45903-0x101088886dd0001, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T01:07:14,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:07:14,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45903-0x101088886dd0001, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:07:14,731 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T01:07:14,733 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5134ffc85563,34043,1731978433911 from backup master directory 2024-11-19T01:07:14,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5134ffc85563,34043,1731978433911 2024-11-19T01:07:14,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45903-0x101088886dd0001, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:07:14,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:07:14,737 WARN [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T01:07:14,737 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5134ffc85563,34043,1731978433911 2024-11-19T01:07:14,739 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-19T01:07:14,742 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-19T01:07:14,798 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/hbase.id] with ID: b3f79b74-49b0-4d34-97ea-848009346fa9 2024-11-19T01:07:14,799 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/.tmp/hbase.id 2024-11-19T01:07:14,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741826_1002 (size=42) 2024-11-19T01:07:14,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741826_1002 (size=42) 2024-11-19T01:07:14,812 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/.tmp/hbase.id]:[hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/hbase.id] 2024-11-19T01:07:14,856 INFO [master/5134ffc85563:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:07:14,862 INFO [master/5134ffc85563:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T01:07:14,881 INFO [master/5134ffc85563:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-19T01:07:14,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45903-0x101088886dd0001, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:07:14,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:07:14,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741827_1003 (size=196) 2024-11-19T01:07:14,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741827_1003 (size=196) 2024-11-19T01:07:14,917 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T01:07:14,919 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T01:07:14,925 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:07:14,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741828_1004 (size=1189) 2024-11-19T01:07:14,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741828_1004 (size=1189) 2024-11-19T01:07:14,976 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store 2024-11-19T01:07:14,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741829_1005 (size=34) 2024-11-19T01:07:14,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741829_1005 (size=34) 2024-11-19T01:07:15,001 INFO [master/5134ffc85563:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-19T01:07:15,004 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:07:15,005 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T01:07:15,006 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:07:15,006 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:07:15,007 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T01:07:15,007 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:07:15,007 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:07:15,008 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731978435005Disabling compacts and flushes for region at 1731978435005Disabling writes for close at 1731978435007 (+2 ms)Writing region close event to WAL at 1731978435007Closed at 1731978435007 2024-11-19T01:07:15,011 WARN [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/.initializing 2024-11-19T01:07:15,011 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/WALs/5134ffc85563,34043,1731978433911 2024-11-19T01:07:15,033 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C34043%2C1731978433911, suffix=, logDir=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/WALs/5134ffc85563,34043,1731978433911, archiveDir=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/oldWALs, maxLogs=10 2024-11-19T01:07:15,042 INFO [master/5134ffc85563:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C34043%2C1731978433911.1731978435038 2024-11-19T01:07:15,061 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/WALs/5134ffc85563,34043,1731978433911/5134ffc85563%2C34043%2C1731978433911.1731978435038 2024-11-19T01:07:15,071 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41887:41887),(127.0.0.1/127.0.0.1:43475:43475)] 2024-11-19T01:07:15,073 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T01:07:15,073 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:07:15,077 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:07:15,078 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:07:15,115 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:07:15,143 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T01:07:15,148 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:07:15,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:07:15,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:07:15,155 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T01:07:15,155 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:07:15,156 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:07:15,156 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:07:15,158 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T01:07:15,159 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:07:15,160 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:07:15,160 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:07:15,163 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T01:07:15,163 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:07:15,164 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:07:15,164 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:07:15,168 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:07:15,169 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:07:15,174 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:07:15,174 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:07:15,178 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T01:07:15,181 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:07:15,187 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T01:07:15,188 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=877874, jitterRate=0.1162751168012619}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T01:07:15,194 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731978435090Initializing all the Stores at 1731978435092 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978435093 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978435093Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978435094 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978435094Cleaning up temporary data from old regions at 1731978435175 (+81 ms)Region opened successfully at 1731978435194 (+19 ms) 2024-11-19T01:07:15,196 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T01:07:15,231 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33968c1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5134ffc85563/172.17.0.2:0 2024-11-19T01:07:15,264 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T01:07:15,276 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T01:07:15,276 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T01:07:15,280 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T01:07:15,281 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-19T01:07:15,286 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-19T01:07:15,286 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T01:07:15,312 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T01:07:15,320 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T01:07:15,322 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T01:07:15,325 INFO [master/5134ffc85563:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T01:07:15,327 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T01:07:15,328 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T01:07:15,330 INFO [master/5134ffc85563:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T01:07:15,335 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T01:07:15,336 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T01:07:15,338 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T01:07:15,339 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T01:07:15,356 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T01:07:15,358 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T01:07:15,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T01:07:15,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45903-0x101088886dd0001, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T01:07:15,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:07:15,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45903-0x101088886dd0001, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:07:15,364 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5134ffc85563,34043,1731978433911, sessionid=0x101088886dd0000, setting cluster-up flag (Was=false) 2024-11-19T01:07:15,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:07:15,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45903-0x101088886dd0001, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:07:15,383 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T01:07:15,388 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5134ffc85563,34043,1731978433911 2024-11-19T01:07:15,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:07:15,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45903-0x101088886dd0001, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:07:15,399 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T01:07:15,400 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5134ffc85563,34043,1731978433911 2024-11-19T01:07:15,407 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T01:07:15,486 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T01:07:15,488 INFO [RS:0;5134ffc85563:45903 {}] regionserver.HRegionServer(746): ClusterId : b3f79b74-49b0-4d34-97ea-848009346fa9 2024-11-19T01:07:15,491 DEBUG [RS:0;5134ffc85563:45903 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T01:07:15,496 DEBUG [RS:0;5134ffc85563:45903 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T01:07:15,497 DEBUG [RS:0;5134ffc85563:45903 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T01:07:15,499 INFO [master/5134ffc85563:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T01:07:15,500 DEBUG [RS:0;5134ffc85563:45903 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T01:07:15,501 DEBUG [RS:0;5134ffc85563:45903 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f440885, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5134ffc85563/172.17.0.2:0 2024-11-19T01:07:15,507 INFO [master/5134ffc85563:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T01:07:15,512 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5134ffc85563,34043,1731978433911 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T01:07:15,520 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:07:15,520 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:07:15,520 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:07:15,520 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:07:15,520 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5134ffc85563:0, corePoolSize=10, maxPoolSize=10 2024-11-19T01:07:15,521 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:07:15,521 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5134ffc85563:0, corePoolSize=2, maxPoolSize=2 2024-11-19T01:07:15,521 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:07:15,522 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731978465522 2024-11-19T01:07:15,523 DEBUG [RS:0;5134ffc85563:45903 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5134ffc85563:45903 2024-11-19T01:07:15,524 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T01:07:15,526 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T01:07:15,526 INFO [RS:0;5134ffc85563:45903 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T01:07:15,526 INFO [RS:0;5134ffc85563:45903 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T01:07:15,527 DEBUG [RS:0;5134ffc85563:45903 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T01:07:15,527 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:07:15,527 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T01:07:15,529 INFO [RS:0;5134ffc85563:45903 {}] regionserver.HRegionServer(2659): reportForDuty to master=5134ffc85563,34043,1731978433911 with port=45903, startcode=1731978434585 2024-11-19T01:07:15,530 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T01:07:15,530 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T01:07:15,530 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T01:07:15,531 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T01:07:15,531 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:15,534 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T01:07:15,534 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:07:15,535 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T01:07:15,535 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T01:07:15,536 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T01:07:15,538 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T01:07:15,539 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T01:07:15,541 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978435540,5,FailOnTimeoutGroup] 2024-11-19T01:07:15,543 DEBUG [RS:0;5134ffc85563:45903 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T01:07:15,545 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978435541,5,FailOnTimeoutGroup] 2024-11-19T01:07:15,545 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:15,545 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T01:07:15,546 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:15,547 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:15,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741831_1007 (size=1321) 2024-11-19T01:07:15,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741831_1007 (size=1321) 2024-11-19T01:07:15,554 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T01:07:15,554 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38 2024-11-19T01:07:15,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741832_1008 (size=32) 2024-11-19T01:07:15,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741832_1008 (size=32) 2024-11-19T01:07:15,570 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:07:15,573 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T01:07:15,577 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T01:07:15,577 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:07:15,578 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:07:15,579 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T01:07:15,581 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T01:07:15,582 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:07:15,583 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:07:15,583 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T01:07:15,586 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T01:07:15,586 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:07:15,587 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:07:15,588 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T01:07:15,590 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T01:07:15,591 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:07:15,592 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:07:15,593 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T01:07:15,595 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740 2024-11-19T01:07:15,596 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740 2024-11-19T01:07:15,599 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T01:07:15,599 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T01:07:15,601 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T01:07:15,603 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T01:07:15,607 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T01:07:15,608 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=847401, jitterRate=0.0775265246629715}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T01:07:15,612 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731978435571Initializing all the Stores at 1731978435573 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978435573Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978435573Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978435573Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978435573Cleaning up temporary data from old regions at 1731978435599 (+26 ms)Region opened successfully at 1731978435612 (+13 ms) 2024-11-19T01:07:15,612 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T01:07:15,612 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T01:07:15,612 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T01:07:15,612 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T01:07:15,612 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T01:07:15,614 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T01:07:15,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731978435612Disabling compacts and flushes for region at 1731978435612Disabling writes for close at 1731978435612Writing region close event to WAL at 1731978435613 (+1 ms)Closed at 1731978435613 2024-11-19T01:07:15,617 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:07:15,617 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T01:07:15,619 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47201, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T01:07:15,625 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T01:07:15,627 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34043 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5134ffc85563,45903,1731978434585 2024-11-19T01:07:15,630 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34043 {}] master.ServerManager(517): Registering regionserver=5134ffc85563,45903,1731978434585 2024-11-19T01:07:15,634 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T01:07:15,637 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T01:07:15,645 DEBUG [RS:0;5134ffc85563:45903 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38 2024-11-19T01:07:15,646 DEBUG [RS:0;5134ffc85563:45903 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39161 2024-11-19T01:07:15,646 DEBUG [RS:0;5134ffc85563:45903 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T01:07:15,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T01:07:15,650 DEBUG [RS:0;5134ffc85563:45903 {}] zookeeper.ZKUtil(111): regionserver:45903-0x101088886dd0001, quorum=127.0.0.1:59946, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5134ffc85563,45903,1731978434585 2024-11-19T01:07:15,651 WARN [RS:0;5134ffc85563:45903 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T01:07:15,651 INFO [RS:0;5134ffc85563:45903 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:07:15,651 DEBUG [RS:0;5134ffc85563:45903 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585 2024-11-19T01:07:15,653 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5134ffc85563,45903,1731978434585] 2024-11-19T01:07:15,678 INFO [RS:0;5134ffc85563:45903 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T01:07:15,689 INFO [RS:0;5134ffc85563:45903 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T01:07:15,694 INFO [RS:0;5134ffc85563:45903 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T01:07:15,694 INFO [RS:0;5134ffc85563:45903 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:15,695 INFO [RS:0;5134ffc85563:45903 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T01:07:15,701 INFO [RS:0;5134ffc85563:45903 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T01:07:15,703 INFO [RS:0;5134ffc85563:45903 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:15,703 DEBUG [RS:0;5134ffc85563:45903 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:07:15,703 DEBUG [RS:0;5134ffc85563:45903 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:07:15,703 DEBUG [RS:0;5134ffc85563:45903 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:07:15,703 DEBUG [RS:0;5134ffc85563:45903 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:07:15,704 DEBUG [RS:0;5134ffc85563:45903 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:07:15,704 DEBUG [RS:0;5134ffc85563:45903 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5134ffc85563:0, corePoolSize=2, maxPoolSize=2 2024-11-19T01:07:15,704 DEBUG [RS:0;5134ffc85563:45903 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:07:15,704 DEBUG [RS:0;5134ffc85563:45903 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:07:15,704 DEBUG [RS:0;5134ffc85563:45903 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:07:15,704 DEBUG [RS:0;5134ffc85563:45903 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:07:15,705 DEBUG [RS:0;5134ffc85563:45903 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:07:15,705 DEBUG [RS:0;5134ffc85563:45903 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:07:15,705 DEBUG [RS:0;5134ffc85563:45903 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5134ffc85563:0, corePoolSize=3, maxPoolSize=3 2024-11-19T01:07:15,705 DEBUG [RS:0;5134ffc85563:45903 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0, corePoolSize=3, maxPoolSize=3 2024-11-19T01:07:15,706 INFO [RS:0;5134ffc85563:45903 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:15,706 INFO [RS:0;5134ffc85563:45903 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:15,706 INFO [RS:0;5134ffc85563:45903 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:15,706 INFO [RS:0;5134ffc85563:45903 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:15,706 INFO [RS:0;5134ffc85563:45903 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:15,706 INFO [RS:0;5134ffc85563:45903 {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,45903,1731978434585-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T01:07:15,724 INFO [RS:0;5134ffc85563:45903 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T01:07:15,726 INFO [RS:0;5134ffc85563:45903 {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,45903,1731978434585-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:15,727 INFO [RS:0;5134ffc85563:45903 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:15,727 INFO [RS:0;5134ffc85563:45903 {}] regionserver.Replication(171): 5134ffc85563,45903,1731978434585 started 2024-11-19T01:07:15,750 INFO [RS:0;5134ffc85563:45903 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:15,750 INFO [RS:0;5134ffc85563:45903 {}] regionserver.HRegionServer(1482): Serving as 5134ffc85563,45903,1731978434585, RpcServer on 5134ffc85563/172.17.0.2:45903, sessionid=0x101088886dd0001 2024-11-19T01:07:15,751 DEBUG [RS:0;5134ffc85563:45903 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T01:07:15,751 DEBUG [RS:0;5134ffc85563:45903 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5134ffc85563,45903,1731978434585 2024-11-19T01:07:15,752 DEBUG [RS:0;5134ffc85563:45903 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5134ffc85563,45903,1731978434585' 2024-11-19T01:07:15,752 DEBUG [RS:0;5134ffc85563:45903 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T01:07:15,753 DEBUG [RS:0;5134ffc85563:45903 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T01:07:15,754 DEBUG [RS:0;5134ffc85563:45903 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T01:07:15,754 DEBUG [RS:0;5134ffc85563:45903 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T01:07:15,754 DEBUG [RS:0;5134ffc85563:45903 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5134ffc85563,45903,1731978434585 2024-11-19T01:07:15,754 DEBUG [RS:0;5134ffc85563:45903 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5134ffc85563,45903,1731978434585' 2024-11-19T01:07:15,754 DEBUG [RS:0;5134ffc85563:45903 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T01:07:15,755 DEBUG [RS:0;5134ffc85563:45903 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T01:07:15,756 DEBUG [RS:0;5134ffc85563:45903 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T01:07:15,756 INFO [RS:0;5134ffc85563:45903 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T01:07:15,756 INFO [RS:0;5134ffc85563:45903 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T01:07:15,788 WARN [5134ffc85563:34043 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T01:07:15,864 INFO [RS:0;5134ffc85563:45903 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C45903%2C1731978434585, suffix=, logDir=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585, archiveDir=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/oldWALs, maxLogs=32 2024-11-19T01:07:15,867 INFO [RS:0;5134ffc85563:45903 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C45903%2C1731978434585.1731978435867 2024-11-19T01:07:15,876 INFO [RS:0;5134ffc85563:45903 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978435867 2024-11-19T01:07:15,878 DEBUG [RS:0;5134ffc85563:45903 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41887:41887),(127.0.0.1/127.0.0.1:43475:43475)] 2024-11-19T01:07:16,040 DEBUG [5134ffc85563:34043 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T01:07:16,053 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5134ffc85563,45903,1731978434585 2024-11-19T01:07:16,059 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5134ffc85563,45903,1731978434585, state=OPENING 2024-11-19T01:07:16,065 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T01:07:16,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45903-0x101088886dd0001, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:07:16,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:07:16,067 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:07:16,067 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:07:16,069 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T01:07:16,071 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5134ffc85563,45903,1731978434585}] 2024-11-19T01:07:16,246 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T01:07:16,250 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51015, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T01:07:16,260 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T01:07:16,261 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:07:16,264 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C45903%2C1731978434585.meta, suffix=.meta, logDir=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585, archiveDir=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/oldWALs, maxLogs=32 2024-11-19T01:07:16,266 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C45903%2C1731978434585.meta.1731978436266.meta 2024-11-19T01:07:16,274 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.meta.1731978436266.meta 2024-11-19T01:07:16,275 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43475:43475),(127.0.0.1/127.0.0.1:41887:41887)] 2024-11-19T01:07:16,278 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T01:07:16,279 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T01:07:16,282 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T01:07:16,287 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T01:07:16,292 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T01:07:16,292 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:07:16,293 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T01:07:16,293 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T01:07:16,296 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T01:07:16,298 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T01:07:16,298 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:07:16,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:07:16,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T01:07:16,301 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T01:07:16,301 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:07:16,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:07:16,303 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T01:07:16,304 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T01:07:16,304 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:07:16,305 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:07:16,305 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T01:07:16,307 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T01:07:16,307 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:07:16,308 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:07:16,308 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T01:07:16,309 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740 2024-11-19T01:07:16,312 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740 2024-11-19T01:07:16,314 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T01:07:16,314 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T01:07:16,315 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T01:07:16,318 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T01:07:16,319 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=781481, jitterRate=-0.0062959641218185425}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T01:07:16,319 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T01:07:16,321 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731978436293Writing region info on filesystem at 1731978436294 (+1 ms)Initializing all the Stores at 1731978436295 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978436295Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978436296 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978436296Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978436296Cleaning up temporary data from old regions at 1731978436314 (+18 ms)Running coprocessor post-open hooks at 1731978436319 (+5 ms)Region opened successfully at 1731978436321 (+2 ms) 2024-11-19T01:07:16,329 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731978436237 2024-11-19T01:07:16,343 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T01:07:16,344 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T01:07:16,346 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5134ffc85563,45903,1731978434585 2024-11-19T01:07:16,348 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5134ffc85563,45903,1731978434585, state=OPEN 2024-11-19T01:07:16,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T01:07:16,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45903-0x101088886dd0001, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T01:07:16,353 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:07:16,354 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:07:16,354 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5134ffc85563,45903,1731978434585 2024-11-19T01:07:16,361 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T01:07:16,361 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5134ffc85563,45903,1731978434585 in 284 msec 2024-11-19T01:07:16,369 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T01:07:16,369 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 739 msec 2024-11-19T01:07:16,371 DEBUG [PEWorker-5 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:07:16,371 INFO [PEWorker-5 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T01:07:16,398 DEBUG [PEWorker-5 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T01:07:16,399 DEBUG [PEWorker-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5134ffc85563,45903,1731978434585, seqNum=-1] 2024-11-19T01:07:16,422 DEBUG [PEWorker-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T01:07:16,424 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48275, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T01:07:16,450 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0120 sec 2024-11-19T01:07:16,450 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731978436450, completionTime=-1 2024-11-19T01:07:16,454 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T01:07:16,455 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T01:07:16,488 INFO [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T01:07:16,488 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731978496488 2024-11-19T01:07:16,488 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731978556488 2024-11-19T01:07:16,488 INFO [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 33 msec 2024-11-19T01:07:16,492 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,34043,1731978433911-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:16,493 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,34043,1731978433911-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:16,493 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,34043,1731978433911-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:16,495 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5134ffc85563:34043, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:16,496 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:16,496 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:16,502 DEBUG [master/5134ffc85563:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T01:07:16,524 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.787sec 2024-11-19T01:07:16,525 INFO [master/5134ffc85563:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T01:07:16,527 INFO [master/5134ffc85563:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T01:07:16,528 INFO [master/5134ffc85563:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T01:07:16,528 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T01:07:16,528 INFO [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T01:07:16,529 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,34043,1731978433911-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T01:07:16,530 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,34043,1731978433911-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T01:07:16,538 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T01:07:16,539 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T01:07:16,540 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,34043,1731978433911-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:07:16,603 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64202c07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:07:16,606 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-19T01:07:16,606 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-19T01:07:16,611 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5134ffc85563,34043,-1 for getting cluster id 2024-11-19T01:07:16,615 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T01:07:16,624 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b3f79b74-49b0-4d34-97ea-848009346fa9' 2024-11-19T01:07:16,627 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T01:07:16,627 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b3f79b74-49b0-4d34-97ea-848009346fa9" 2024-11-19T01:07:16,629 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5879d47e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:07:16,630 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5134ffc85563,34043,-1] 2024-11-19T01:07:16,633 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T01:07:16,635 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:07:16,636 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46230, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T01:07:16,640 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@717eca08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:07:16,640 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T01:07:16,648 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5134ffc85563,45903,1731978434585, seqNum=-1] 2024-11-19T01:07:16,649 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T01:07:16,651 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37120, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T01:07:16,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5134ffc85563,34043,1731978433911 2024-11-19T01:07:16,674 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:07:16,682 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T01:07:16,686 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T01:07:16,691 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5134ffc85563,34043,1731978433911 2024-11-19T01:07:16,694 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3a95d6f2 2024-11-19T01:07:16,695 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T01:07:16,698 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46246, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T01:07:16,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34043 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T01:07:16,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34043 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T01:07:16,704 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34043 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T01:07:16,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34043 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-19T01:07:16,716 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T01:07:16,718 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34043 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-19T01:07:16,719 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:07:16,721 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T01:07:16,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T01:07:16,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741835_1011 (size=389) 2024-11-19T01:07:16,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741835_1011 (size=389) 2024-11-19T01:07:16,778 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ea03a7d16151cbebdef478ed8948cbe5, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38 2024-11-19T01:07:16,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741836_1012 (size=72) 2024-11-19T01:07:16,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741836_1012 (size=72) 2024-11-19T01:07:16,811 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:07:16,811 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing ea03a7d16151cbebdef478ed8948cbe5, disabling compactions & flushes 2024-11-19T01:07:16,811 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5. 2024-11-19T01:07:16,811 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5. 2024-11-19T01:07:16,811 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5. after waiting 0 ms 2024-11-19T01:07:16,811 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5. 2024-11-19T01:07:16,811 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5. 2024-11-19T01:07:16,811 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for ea03a7d16151cbebdef478ed8948cbe5: Waiting for close lock at 1731978436811Disabling compacts and flushes for region at 1731978436811Disabling writes for close at 1731978436811Writing region close event to WAL at 1731978436811Closed at 1731978436811 2024-11-19T01:07:16,814 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T01:07:16,819 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731978436814"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731978436814"}]},"ts":"1731978436814"} 2024-11-19T01:07:16,825 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T01:07:16,827 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T01:07:16,830 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731978436827"}]},"ts":"1731978436827"} 2024-11-19T01:07:16,839 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-19T01:07:16,842 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ea03a7d16151cbebdef478ed8948cbe5, ASSIGN}] 2024-11-19T01:07:16,844 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ea03a7d16151cbebdef478ed8948cbe5, ASSIGN 2024-11-19T01:07:16,848 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ea03a7d16151cbebdef478ed8948cbe5, ASSIGN; state=OFFLINE, location=5134ffc85563,45903,1731978434585; forceNewPlan=false, retain=false 2024-11-19T01:07:16,999 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ea03a7d16151cbebdef478ed8948cbe5, regionState=OPENING, regionLocation=5134ffc85563,45903,1731978434585 2024-11-19T01:07:17,005 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ea03a7d16151cbebdef478ed8948cbe5, ASSIGN because future has completed 2024-11-19T01:07:17,006 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ea03a7d16151cbebdef478ed8948cbe5, server=5134ffc85563,45903,1731978434585}] 2024-11-19T01:07:17,169 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5. 2024-11-19T01:07:17,170 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ea03a7d16151cbebdef478ed8948cbe5, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5.', STARTKEY => '', ENDKEY => ''} 2024-11-19T01:07:17,170 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling ea03a7d16151cbebdef478ed8948cbe5 2024-11-19T01:07:17,170 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:07:17,171 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ea03a7d16151cbebdef478ed8948cbe5 2024-11-19T01:07:17,171 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ea03a7d16151cbebdef478ed8948cbe5 2024-11-19T01:07:17,173 INFO [StoreOpener-ea03a7d16151cbebdef478ed8948cbe5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ea03a7d16151cbebdef478ed8948cbe5 2024-11-19T01:07:17,176 INFO [StoreOpener-ea03a7d16151cbebdef478ed8948cbe5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ea03a7d16151cbebdef478ed8948cbe5 columnFamilyName info 2024-11-19T01:07:17,176 DEBUG [StoreOpener-ea03a7d16151cbebdef478ed8948cbe5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:07:17,177 INFO [StoreOpener-ea03a7d16151cbebdef478ed8948cbe5-1 {}] regionserver.HStore(327): Store=ea03a7d16151cbebdef478ed8948cbe5/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:07:17,178 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ea03a7d16151cbebdef478ed8948cbe5 2024-11-19T01:07:17,179 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5 2024-11-19T01:07:17,180 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5 2024-11-19T01:07:17,181 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ea03a7d16151cbebdef478ed8948cbe5 2024-11-19T01:07:17,181 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ea03a7d16151cbebdef478ed8948cbe5 2024-11-19T01:07:17,183 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ea03a7d16151cbebdef478ed8948cbe5 2024-11-19T01:07:17,187 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T01:07:17,188 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ea03a7d16151cbebdef478ed8948cbe5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=706799, jitterRate=-0.1012592762708664}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T01:07:17,188 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ea03a7d16151cbebdef478ed8948cbe5 2024-11-19T01:07:17,189 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ea03a7d16151cbebdef478ed8948cbe5: Running coprocessor pre-open hook at 1731978437171Writing region info on filesystem at 1731978437171Initializing all the Stores at 1731978437173 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978437173Cleaning up temporary data from old regions at 1731978437181 (+8 ms)Running coprocessor post-open hooks at 1731978437188 (+7 ms)Region opened successfully at 1731978437189 (+1 ms) 2024-11-19T01:07:17,191 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5., pid=6, masterSystemTime=1731978437161 2024-11-19T01:07:17,195 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5. 2024-11-19T01:07:17,195 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5. 2024-11-19T01:07:17,197 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ea03a7d16151cbebdef478ed8948cbe5, regionState=OPEN, openSeqNum=2, regionLocation=5134ffc85563,45903,1731978434585 2024-11-19T01:07:17,200 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ea03a7d16151cbebdef478ed8948cbe5, server=5134ffc85563,45903,1731978434585 because future has completed 2024-11-19T01:07:17,206 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T01:07:17,207 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ea03a7d16151cbebdef478ed8948cbe5, server=5134ffc85563,45903,1731978434585 in 196 msec 2024-11-19T01:07:17,211 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T01:07:17,211 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ea03a7d16151cbebdef478ed8948cbe5, ASSIGN in 365 msec 2024-11-19T01:07:17,212 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T01:07:17,213 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731978437213"}]},"ts":"1731978437213"} 2024-11-19T01:07:17,216 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-19T01:07:17,218 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T01:07:17,221 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 511 msec 2024-11-19T01:07:21,796 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-19T01:07:21,851 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T01:07:21,853 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-19T01:07:24,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T01:07:24,320 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T01:07:24,322 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-19T01:07:24,322 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-19T01:07:24,322 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T01:07:24,323 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T01:07:24,323 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T01:07:24,323 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-19T01:07:26,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T01:07:26,756 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-19T01:07:26,759 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-19T01:07:26,766 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-19T01:07:26,767 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5. 2024-11-19T01:07:26,769 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C45903%2C1731978434585.1731978446768 2024-11-19T01:07:26,778 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:07:26,779 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:07:26,779 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:07:26,779 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:07:26,779 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:07:26,780 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978435867 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978446768 2024-11-19T01:07:26,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741833_1009 (size=451) 2024-11-19T01:07:26,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741833_1009 (size=451) 2024-11-19T01:07:26,789 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978435867 to hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/oldWALs/5134ffc85563%2C45903%2C1731978434585.1731978435867 2024-11-19T01:07:26,789 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41887:41887),(127.0.0.1/127.0.0.1:43475:43475)] 2024-11-19T01:07:26,799 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5., hostname=5134ffc85563,45903,1731978434585, seqNum=2] 2024-11-19T01:07:38,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45903 {}] regionserver.HRegion(8855): Flush requested on ea03a7d16151cbebdef478ed8948cbe5 2024-11-19T01:07:38,843 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ea03a7d16151cbebdef478ed8948cbe5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T01:07:38,923 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/.tmp/info/11d62be9c33a4bbb86be0b59afa53131 is 1080, key is row0001/info:/1731978446802/Put/seqid=0 2024-11-19T01:07:38,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741838_1014 (size=12509) 2024-11-19T01:07:38,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741838_1014 (size=12509) 2024-11-19T01:07:38,939 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/.tmp/info/11d62be9c33a4bbb86be0b59afa53131 2024-11-19T01:07:38,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/.tmp/info/11d62be9c33a4bbb86be0b59afa53131 as hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/11d62be9c33a4bbb86be0b59afa53131 2024-11-19T01:07:39,006 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/11d62be9c33a4bbb86be0b59afa53131, entries=7, sequenceid=11, filesize=12.2 K 2024-11-19T01:07:39,016 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ea03a7d16151cbebdef478ed8948cbe5 in 170ms, sequenceid=11, compaction requested=false 2024-11-19T01:07:39,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ea03a7d16151cbebdef478ed8948cbe5: 2024-11-19T01:07:43,004 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T01:07:46,854 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C45903%2C1731978434585.1731978466853 2024-11-19T01:07:47,064 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:07:47,065 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:07:47,065 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:07:47,065 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:07:47,065 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:07:47,065 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:07:47,066 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978446768 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978466853 2024-11-19T01:07:47,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741837_1013 (size=12399) 2024-11-19T01:07:47,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741837_1013 (size=12399) 2024-11-19T01:07:47,073 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41887:41887),(127.0.0.1/127.0.0.1:43475:43475)] 2024-11-19T01:07:47,276 INFO [FSHLog-0-hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38-prefix:5134ffc85563,45903,1731978434585 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:07:49,480 INFO [FSHLog-0-hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38-prefix:5134ffc85563,45903,1731978434585 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:07:51,685 INFO [FSHLog-0-hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38-prefix:5134ffc85563,45903,1731978434585 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:07:53,889 INFO [FSHLog-0-hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38-prefix:5134ffc85563,45903,1731978434585 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:07:53,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45903 {}] regionserver.HRegion(8855): Flush requested on ea03a7d16151cbebdef478ed8948cbe5 2024-11-19T01:07:53,890 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ea03a7d16151cbebdef478ed8948cbe5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T01:07:54,091 INFO [FSHLog-0-hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38-prefix:5134ffc85563,45903,1731978434585 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:07:54,097 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/.tmp/info/e392b680a3d648d9b6098bfb34dd2405 is 1080, key is row0008/info:/1731978460843/Put/seqid=0 2024-11-19T01:07:54,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741840_1016 (size=12509) 2024-11-19T01:07:54,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741840_1016 (size=12509) 2024-11-19T01:07:54,111 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/.tmp/info/e392b680a3d648d9b6098bfb34dd2405 2024-11-19T01:07:54,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/.tmp/info/e392b680a3d648d9b6098bfb34dd2405 as hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/e392b680a3d648d9b6098bfb34dd2405 2024-11-19T01:07:54,133 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/e392b680a3d648d9b6098bfb34dd2405, entries=7, sequenceid=21, filesize=12.2 K 2024-11-19T01:07:54,335 INFO [FSHLog-0-hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38-prefix:5134ffc85563,45903,1731978434585 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:07:54,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ea03a7d16151cbebdef478ed8948cbe5 in 446ms, sequenceid=21, compaction requested=false 2024-11-19T01:07:54,335 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ea03a7d16151cbebdef478ed8948cbe5: 2024-11-19T01:07:54,335 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-19T01:07:54,336 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:07:54,337 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/11d62be9c33a4bbb86be0b59afa53131 because midkey is the same as first or last row 2024-11-19T01:07:56,094 INFO [FSHLog-0-hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38-prefix:5134ffc85563,45903,1731978434585 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:07:56,576 INFO [master/5134ffc85563:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T01:07:56,576 INFO [master/5134ffc85563:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T01:07:58,298 INFO [FSHLog-0-hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38-prefix:5134ffc85563,45903,1731978434585 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:07:58,300 WARN [FSHLog-0-hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38-prefix:5134ffc85563,45903,1731978434585 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:07:58,301 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5134ffc85563%2C45903%2C1731978434585:(num 1731978466853) roll requested 2024-11-19T01:07:58,301 INFO [regionserver/5134ffc85563:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C45903%2C1731978434585.1731978478301 2024-11-19T01:07:58,509 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:07:58,510 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:07:58,510 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:07:58,510 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:07:58,510 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:07:58,510 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:07:58,510 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978466853 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978478301 2024-11-19T01:07:58,511 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41887:41887),(127.0.0.1/127.0.0.1:43475:43475)] 2024-11-19T01:07:58,511 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978466853 is not closed yet, will try archiving it next time 2024-11-19T01:07:58,512 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978446768 to hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/oldWALs/5134ffc85563%2C45903%2C1731978434585.1731978446768 2024-11-19T01:07:58,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741839_1015 (size=7739) 2024-11-19T01:07:58,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741839_1015 (size=7739) 2024-11-19T01:08:00,502 INFO [FSHLog-0-hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38-prefix:5134ffc85563,45903,1731978434585 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:08:02,171 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ea03a7d16151cbebdef478ed8948cbe5, had cached 0 bytes from a total of 25018 2024-11-19T01:08:02,706 INFO [FSHLog-0-hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38-prefix:5134ffc85563,45903,1731978434585 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:08:04,910 INFO [FSHLog-0-hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38-prefix:5134ffc85563,45903,1731978434585 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:08:07,115 INFO [FSHLog-0-hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38-prefix:5134ffc85563,45903,1731978434585 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:08:09,117 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T01:08:09,117 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C45903%2C1731978434585.1731978489117 2024-11-19T01:08:13,005 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T01:08:14,125 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:08:14,127 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:08:14,127 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5134ffc85563%2C45903%2C1731978434585:(num 1731978489117) roll requested 2024-11-19T01:08:14,128 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:14,128 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:14,128 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:14,128 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:14,128 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:14,128 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978478301 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978489117 2024-11-19T01:08:14,129 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41887:41887),(127.0.0.1/127.0.0.1:43475:43475)] 2024-11-19T01:08:14,129 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978478301 is not closed yet, will try archiving it next time 2024-11-19T01:08:14,130 INFO [regionserver/5134ffc85563:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C45903%2C1731978434585.1731978494130 2024-11-19T01:08:14,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741841_1017 (size=4753) 2024-11-19T01:08:14,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741841_1017 (size=4753) 2024-11-19T01:08:19,133 INFO [FSHLog-0-hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38-prefix:5134ffc85563,45903,1731978434585 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:08:19,133 WARN [FSHLog-0-hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38-prefix:5134ffc85563,45903,1731978434585 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:08:19,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45903 {}] regionserver.HRegion(8855): Flush requested on ea03a7d16151cbebdef478ed8948cbe5 2024-11-19T01:08:19,134 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ea03a7d16151cbebdef478ed8948cbe5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T01:08:19,142 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5008 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:08:19,142 WARN [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5008 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:08:21,135 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T01:08:24,137 INFO [FSHLog-0-hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38-prefix:5134ffc85563,45903,1731978434585 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:08:24,137 WARN [FSHLog-0-hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38-prefix:5134ffc85563,45903,1731978434585 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:08:24,137 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:24,137 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:24,138 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:24,138 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:24,138 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:24,138 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978489117 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978494130 2024-11-19T01:08:24,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741842_1018 (size=1569) 2024-11-19T01:08:24,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741842_1018 (size=1569) 2024-11-19T01:08:24,142 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41887:41887),(127.0.0.1/127.0.0.1:43475:43475)] 2024-11-19T01:08:24,143 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978489117 is not closed yet, will try archiving it next time 2024-11-19T01:08:24,143 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5134ffc85563%2C45903%2C1731978434585:(num 1731978494130) roll requested 2024-11-19T01:08:24,143 INFO [regionserver/5134ffc85563:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C45903%2C1731978434585.1731978504143 2024-11-19T01:08:24,144 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/.tmp/info/4635a3d35f6947d183d1608ec9ce4123 is 1080, key is row0015/info:/1731978475892/Put/seqid=0 2024-11-19T01:08:24,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741844_1020 (size=12509) 2024-11-19T01:08:24,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741844_1020 (size=12509) 2024-11-19T01:08:24,151 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/.tmp/info/4635a3d35f6947d183d1608ec9ce4123 2024-11-19T01:08:24,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/.tmp/info/4635a3d35f6947d183d1608ec9ce4123 as hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/4635a3d35f6947d183d1608ec9ce4123 2024-11-19T01:08:24,170 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/4635a3d35f6947d183d1608ec9ce4123, entries=7, sequenceid=31, filesize=12.2 K 2024-11-19T01:08:29,151 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:08:29,151 WARN [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:08:29,172 INFO [FSHLog-0-hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38-prefix:5134ffc85563,45903,1731978434585 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:08:29,172 WARN [FSHLog-0-hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38-prefix:5134ffc85563,45903,1731978434585 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38171,DS-d5098713-ab33-4d3a-8b36-b5e5aa82a3d4,DISK], DatanodeInfoWithStorage[127.0.0.1:45823,DS-d869a2f4-c604-4854-80c3-b575a230e78e,DISK]] 2024-11-19T01:08:29,172 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ea03a7d16151cbebdef478ed8948cbe5 in 10038ms, sequenceid=31, compaction requested=true 2024-11-19T01:08:29,172 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:29,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ea03a7d16151cbebdef478ed8948cbe5: 2024-11-19T01:08:29,172 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:29,173 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:29,173 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-19T01:08:29,173 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:08:29,173 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:29,173 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/11d62be9c33a4bbb86be0b59afa53131 because midkey is the same as first or last row 2024-11-19T01:08:29,173 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:29,173 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978494130 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978504143 2024-11-19T01:08:29,174 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41887:41887),(127.0.0.1/127.0.0.1:43475:43475)] 2024-11-19T01:08:29,174 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978494130 is not closed yet, will try archiving it next time 2024-11-19T01:08:29,174 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978466853 to hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/oldWALs/5134ffc85563%2C45903%2C1731978434585.1731978466853 2024-11-19T01:08:29,174 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5134ffc85563%2C45903%2C1731978434585:(num 1731978504143) roll requested 2024-11-19T01:08:29,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ea03a7d16151cbebdef478ed8948cbe5:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T01:08:29,175 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C45903%2C1731978434585.1731978509175 2024-11-19T01:08:29,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741843_1019 (size=438) 2024-11-19T01:08:29,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741843_1019 (size=438) 2024-11-19T01:08:29,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:08:29,177 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978478301 to hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/oldWALs/5134ffc85563%2C45903%2C1731978434585.1731978478301 2024-11-19T01:08:29,178 DEBUG [RS:0;5134ffc85563:45903-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T01:08:29,179 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978489117 to hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/oldWALs/5134ffc85563%2C45903%2C1731978434585.1731978489117 2024-11-19T01:08:29,181 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978494130 to hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/oldWALs/5134ffc85563%2C45903%2C1731978434585.1731978494130 2024-11-19T01:08:29,184 DEBUG [RS:0;5134ffc85563:45903-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T01:08:29,186 DEBUG [RS:0;5134ffc85563:45903-shortCompactions-0 {}] regionserver.HStore(1541): ea03a7d16151cbebdef478ed8948cbe5/info is initiating minor compaction (all files) 2024-11-19T01:08:29,186 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:29,186 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:29,186 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:29,186 INFO [RS:0;5134ffc85563:45903-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ea03a7d16151cbebdef478ed8948cbe5/info in TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5. 2024-11-19T01:08:29,186 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:29,186 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:29,187 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978504143 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978509175 2024-11-19T01:08:29,187 INFO [RS:0;5134ffc85563:45903-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/11d62be9c33a4bbb86be0b59afa53131, hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/e392b680a3d648d9b6098bfb34dd2405, hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/4635a3d35f6947d183d1608ec9ce4123] into tmpdir=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/.tmp, totalSize=36.6 K 2024-11-19T01:08:29,188 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41887:41887),(127.0.0.1/127.0.0.1:43475:43475)] 2024-11-19T01:08:29,188 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978504143 is not closed yet, will try archiving it next time 2024-11-19T01:08:29,188 INFO [regionserver/5134ffc85563:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C45903%2C1731978434585.1731978509188 2024-11-19T01:08:29,188 DEBUG [RS:0;5134ffc85563:45903-shortCompactions-0 {}] compactions.Compactor(225): Compacting 11d62be9c33a4bbb86be0b59afa53131, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731978446802 2024-11-19T01:08:29,190 DEBUG [RS:0;5134ffc85563:45903-shortCompactions-0 {}] compactions.Compactor(225): Compacting e392b680a3d648d9b6098bfb34dd2405, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731978460843 2024-11-19T01:08:29,191 DEBUG [RS:0;5134ffc85563:45903-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4635a3d35f6947d183d1608ec9ce4123, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731978475892 2024-11-19T01:08:29,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741845_1021 (size=93) 2024-11-19T01:08:29,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741845_1021 (size=93) 2024-11-19T01:08:29,203 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:29,203 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:29,203 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:29,203 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:29,203 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:29,204 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978509175 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978509188 2024-11-19T01:08:29,205 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43475:43475),(127.0.0.1/127.0.0.1:41887:41887)] 2024-11-19T01:08:29,205 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978504143 is not closed yet, will try archiving it next time 2024-11-19T01:08:29,205 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978509175 is not closed yet, will try archiving it next time 2024-11-19T01:08:29,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741846_1022 (size=1258) 2024-11-19T01:08:29,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741846_1022 (size=1258) 2024-11-19T01:08:29,207 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978504143 is not closed yet, will try archiving it next time 2024-11-19T01:08:29,224 INFO [RS:0;5134ffc85563:45903-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ea03a7d16151cbebdef478ed8948cbe5#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T01:08:29,226 DEBUG [RS:0;5134ffc85563:45903-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/.tmp/info/cc9267548b2e4fc082126901617aab75 is 1080, key is row0001/info:/1731978446802/Put/seqid=0 2024-11-19T01:08:29,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741848_1024 (size=27710) 2024-11-19T01:08:29,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741848_1024 (size=27710) 2024-11-19T01:08:29,248 DEBUG [RS:0;5134ffc85563:45903-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/.tmp/info/cc9267548b2e4fc082126901617aab75 as hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/cc9267548b2e4fc082126901617aab75 2024-11-19T01:08:29,267 INFO [RS:0;5134ffc85563:45903-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ea03a7d16151cbebdef478ed8948cbe5/info of ea03a7d16151cbebdef478ed8948cbe5 into cc9267548b2e4fc082126901617aab75(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T01:08:29,267 DEBUG [RS:0;5134ffc85563:45903-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ea03a7d16151cbebdef478ed8948cbe5: 2024-11-19T01:08:29,269 INFO [RS:0;5134ffc85563:45903-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5., storeName=ea03a7d16151cbebdef478ed8948cbe5/info, priority=13, startTime=1731978509174; duration=0sec 2024-11-19T01:08:29,270 DEBUG [RS:0;5134ffc85563:45903-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-19T01:08:29,270 DEBUG [RS:0;5134ffc85563:45903-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:08:29,270 DEBUG [RS:0;5134ffc85563:45903-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/cc9267548b2e4fc082126901617aab75 because midkey is the same as first or last row 2024-11-19T01:08:29,270 DEBUG [RS:0;5134ffc85563:45903-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-19T01:08:29,270 DEBUG [RS:0;5134ffc85563:45903-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:08:29,271 DEBUG [RS:0;5134ffc85563:45903-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/cc9267548b2e4fc082126901617aab75 because midkey is the same as first or last row 2024-11-19T01:08:29,271 DEBUG [RS:0;5134ffc85563:45903-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-19T01:08:29,271 DEBUG [RS:0;5134ffc85563:45903-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:08:29,271 DEBUG [RS:0;5134ffc85563:45903-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/cc9267548b2e4fc082126901617aab75 because midkey is the same as first or last row 2024-11-19T01:08:29,271 DEBUG [RS:0;5134ffc85563:45903-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:08:29,271 DEBUG [RS:0;5134ffc85563:45903-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ea03a7d16151cbebdef478ed8948cbe5:info 2024-11-19T01:08:29,592 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/WALs/5134ffc85563,45903,1731978434585/5134ffc85563%2C45903%2C1731978434585.1731978504143 to hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/oldWALs/5134ffc85563%2C45903%2C1731978434585.1731978504143 2024-11-19T01:08:41,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45903 {}] regionserver.HRegion(8855): Flush requested on ea03a7d16151cbebdef478ed8948cbe5 2024-11-19T01:08:41,215 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ea03a7d16151cbebdef478ed8948cbe5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T01:08:41,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/.tmp/info/a15ccaafb38e449ba4ae71c9a103c2ea is 1080, key is row0022/info:/1731978509190/Put/seqid=0 2024-11-19T01:08:41,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741849_1025 (size=12509) 2024-11-19T01:08:41,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741849_1025 (size=12509) 2024-11-19T01:08:41,232 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/.tmp/info/a15ccaafb38e449ba4ae71c9a103c2ea 2024-11-19T01:08:41,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/.tmp/info/a15ccaafb38e449ba4ae71c9a103c2ea as hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/a15ccaafb38e449ba4ae71c9a103c2ea 2024-11-19T01:08:41,252 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/a15ccaafb38e449ba4ae71c9a103c2ea, entries=7, sequenceid=42, filesize=12.2 K 2024-11-19T01:08:41,254 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ea03a7d16151cbebdef478ed8948cbe5 in 38ms, sequenceid=42, compaction requested=false 2024-11-19T01:08:41,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ea03a7d16151cbebdef478ed8948cbe5: 2024-11-19T01:08:41,254 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-19T01:08:41,254 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:08:41,254 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/cc9267548b2e4fc082126901617aab75 because midkey is the same as first or last row 2024-11-19T01:08:43,005 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T01:08:47,171 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ea03a7d16151cbebdef478ed8948cbe5, had cached 0 bytes from a total of 40219 2024-11-19T01:08:49,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T01:08:49,228 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T01:08:49,229 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:08:49,235 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:08:49,236 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:08:49,236 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T01:08:49,237 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T01:08:49,237 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2128600098, stopped=false 2024-11-19T01:08:49,237 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5134ffc85563,34043,1731978433911 2024-11-19T01:08:49,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45903-0x101088886dd0001, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T01:08:49,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T01:08:49,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:49,239 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T01:08:49,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45903-0x101088886dd0001, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:49,240 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T01:08:49,240 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:08:49,240 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:08:49,241 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5134ffc85563,45903,1731978434585' ***** 2024-11-19T01:08:49,241 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T01:08:49,241 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45903-0x101088886dd0001, quorum=127.0.0.1:59946, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:08:49,241 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:08:49,241 INFO [RS:0;5134ffc85563:45903 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T01:08:49,241 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T01:08:49,241 INFO [RS:0;5134ffc85563:45903 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T01:08:49,242 INFO [RS:0;5134ffc85563:45903 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T01:08:49,242 INFO [RS:0;5134ffc85563:45903 {}] regionserver.HRegionServer(3091): Received CLOSE for ea03a7d16151cbebdef478ed8948cbe5 2024-11-19T01:08:49,242 INFO [RS:0;5134ffc85563:45903 {}] regionserver.HRegionServer(959): stopping server 5134ffc85563,45903,1731978434585 2024-11-19T01:08:49,242 INFO [RS:0;5134ffc85563:45903 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T01:08:49,243 INFO [RS:0;5134ffc85563:45903 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5134ffc85563:45903. 2024-11-19T01:08:49,243 DEBUG [RS:0;5134ffc85563:45903 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:08:49,243 DEBUG [RS:0;5134ffc85563:45903 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:08:49,243 INFO [RS:0;5134ffc85563:45903 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T01:08:49,243 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ea03a7d16151cbebdef478ed8948cbe5, disabling compactions & flushes 2024-11-19T01:08:49,243 INFO [RS:0;5134ffc85563:45903 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T01:08:49,243 INFO [RS:0;5134ffc85563:45903 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T01:08:49,243 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5. 2024-11-19T01:08:49,243 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5. 2024-11-19T01:08:49,243 INFO [RS:0;5134ffc85563:45903 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T01:08:49,243 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5. after waiting 0 ms 2024-11-19T01:08:49,243 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5. 2024-11-19T01:08:49,244 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing ea03a7d16151cbebdef478ed8948cbe5 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-19T01:08:49,244 INFO [RS:0;5134ffc85563:45903 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T01:08:49,244 DEBUG [RS:0;5134ffc85563:45903 {}] regionserver.HRegionServer(1325): Online Regions={ea03a7d16151cbebdef478ed8948cbe5=TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5., 1588230740=hbase:meta,,1.1588230740} 2024-11-19T01:08:49,244 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T01:08:49,244 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T01:08:49,244 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T01:08:49,244 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T01:08:49,244 DEBUG [RS:0;5134ffc85563:45903 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ea03a7d16151cbebdef478ed8948cbe5 2024-11-19T01:08:49,244 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T01:08:49,245 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-19T01:08:49,259 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/.tmp/info/8194aa5a13a5497896ec109e8ac4a755 is 1080, key is row0029/info:/1731978523217/Put/seqid=0 2024-11-19T01:08:49,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741850_1026 (size=8193) 2024-11-19T01:08:49,275 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740/.tmp/info/4ca9d687a07949788571eb27dc54a00b is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5./info:regioninfo/1731978437196/Put/seqid=0 2024-11-19T01:08:49,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741850_1026 (size=8193) 2024-11-19T01:08:49,277 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/.tmp/info/8194aa5a13a5497896ec109e8ac4a755 2024-11-19T01:08:49,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741851_1027 (size=7016) 2024-11-19T01:08:49,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741851_1027 (size=7016) 2024-11-19T01:08:49,290 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740/.tmp/info/4ca9d687a07949788571eb27dc54a00b 2024-11-19T01:08:49,290 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/.tmp/info/8194aa5a13a5497896ec109e8ac4a755 as hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/8194aa5a13a5497896ec109e8ac4a755 2024-11-19T01:08:49,302 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/8194aa5a13a5497896ec109e8ac4a755, entries=3, sequenceid=48, filesize=8.0 K 2024-11-19T01:08:49,304 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for ea03a7d16151cbebdef478ed8948cbe5 in 60ms, sequenceid=48, compaction requested=true 2024-11-19T01:08:49,304 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/11d62be9c33a4bbb86be0b59afa53131, hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/e392b680a3d648d9b6098bfb34dd2405, hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/4635a3d35f6947d183d1608ec9ce4123] to archive 2024-11-19T01:08:49,308 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T01:08:49,312 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/11d62be9c33a4bbb86be0b59afa53131 to hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/archive/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/11d62be9c33a4bbb86be0b59afa53131 2024-11-19T01:08:49,314 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/e392b680a3d648d9b6098bfb34dd2405 to hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/archive/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/e392b680a3d648d9b6098bfb34dd2405 2024-11-19T01:08:49,316 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/4635a3d35f6947d183d1608ec9ce4123 to hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/archive/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/info/4635a3d35f6947d183d1608ec9ce4123 2024-11-19T01:08:49,324 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740/.tmp/ns/896e7b438de6484abc2f39bb094dcc2c is 43, key is default/ns:d/1731978436430/Put/seqid=0 2024-11-19T01:08:49,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741852_1028 (size=5153) 2024-11-19T01:08:49,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741852_1028 (size=5153) 2024-11-19T01:08:49,336 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740/.tmp/ns/896e7b438de6484abc2f39bb094dcc2c 2024-11-19T01:08:49,331 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5134ffc85563:34043 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-19T01:08:49,337 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [11d62be9c33a4bbb86be0b59afa53131=12509, e392b680a3d648d9b6098bfb34dd2405=12509, 4635a3d35f6947d183d1608ec9ce4123=12509] 2024-11-19T01:08:49,343 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/default/TestLogRolling-testSlowSyncLogRolling/ea03a7d16151cbebdef478ed8948cbe5/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-19T01:08:49,346 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5. 2024-11-19T01:08:49,347 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ea03a7d16151cbebdef478ed8948cbe5: Waiting for close lock at 1731978529242Running coprocessor pre-close hooks at 1731978529243 (+1 ms)Disabling compacts and flushes for region at 1731978529243Disabling writes for close at 1731978529243Obtaining lock to block concurrent updates at 1731978529244 (+1 ms)Preparing flush snapshotting stores in ea03a7d16151cbebdef478ed8948cbe5 at 1731978529244Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731978529244Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5. at 1731978529246 (+2 ms)Flushing ea03a7d16151cbebdef478ed8948cbe5/info: creating writer at 1731978529246Flushing ea03a7d16151cbebdef478ed8948cbe5/info: appending metadata at 1731978529258 (+12 ms)Flushing ea03a7d16151cbebdef478ed8948cbe5/info: closing flushed file at 1731978529258Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e28aa71: reopening flushed file at 1731978529288 (+30 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for ea03a7d16151cbebdef478ed8948cbe5 in 60ms, sequenceid=48, compaction requested=true at 1731978529304 (+16 ms)Writing region close event to WAL at 1731978529338 (+34 ms)Running coprocessor post-close hooks at 1731978529344 (+6 ms)Closed at 1731978529346 (+2 ms) 2024-11-19T01:08:49,347 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731978436700.ea03a7d16151cbebdef478ed8948cbe5. 2024-11-19T01:08:49,364 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740/.tmp/table/d95fa208154248f999c1ab227a20da96 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731978437213/Put/seqid=0 2024-11-19T01:08:49,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741853_1029 (size=5396) 2024-11-19T01:08:49,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741853_1029 (size=5396) 2024-11-19T01:08:49,372 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740/.tmp/table/d95fa208154248f999c1ab227a20da96 2024-11-19T01:08:49,380 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740/.tmp/info/4ca9d687a07949788571eb27dc54a00b as hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740/info/4ca9d687a07949788571eb27dc54a00b 2024-11-19T01:08:49,387 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740/info/4ca9d687a07949788571eb27dc54a00b, entries=10, sequenceid=11, filesize=6.9 K 2024-11-19T01:08:49,388 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740/.tmp/ns/896e7b438de6484abc2f39bb094dcc2c as hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740/ns/896e7b438de6484abc2f39bb094dcc2c 2024-11-19T01:08:49,394 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740/ns/896e7b438de6484abc2f39bb094dcc2c, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T01:08:49,395 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740/.tmp/table/d95fa208154248f999c1ab227a20da96 as hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740/table/d95fa208154248f999c1ab227a20da96 2024-11-19T01:08:49,402 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740/table/d95fa208154248f999c1ab227a20da96, entries=2, sequenceid=11, filesize=5.3 K 2024-11-19T01:08:49,403 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 159ms, sequenceid=11, compaction requested=false 2024-11-19T01:08:49,409 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T01:08:49,410 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T01:08:49,410 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T01:08:49,410 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731978529244Running coprocessor pre-close hooks at 1731978529244Disabling compacts and flushes for region at 1731978529244Disabling writes for close at 1731978529244Obtaining lock to block concurrent updates at 1731978529245 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731978529245Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731978529245Flushing stores of hbase:meta,,1.1588230740 at 1731978529246 (+1 ms)Flushing 1588230740/info: creating writer at 1731978529247 (+1 ms)Flushing 1588230740/info: appending metadata at 1731978529274 (+27 ms)Flushing 1588230740/info: closing flushed file at 1731978529274Flushing 1588230740/ns: creating writer at 1731978529299 (+25 ms)Flushing 1588230740/ns: appending metadata at 1731978529323 (+24 ms)Flushing 1588230740/ns: closing flushed file at 1731978529323Flushing 1588230740/table: creating writer at 1731978529345 (+22 ms)Flushing 1588230740/table: appending metadata at 1731978529364 (+19 ms)Flushing 1588230740/table: closing flushed file at 1731978529364Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c25d3a4: reopening flushed file at 1731978529379 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@51cf1fb0: reopening flushed file at 1731978529387 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7acb0ca8: reopening flushed file at 1731978529395 (+8 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 159ms, sequenceid=11, compaction requested=false at 1731978529403 (+8 ms)Writing region close event to WAL at 1731978529405 (+2 ms)Running coprocessor post-close hooks at 1731978529410 (+5 ms)Closed at 1731978529410 2024-11-19T01:08:49,411 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T01:08:49,445 INFO [RS:0;5134ffc85563:45903 {}] regionserver.HRegionServer(976): stopping server 5134ffc85563,45903,1731978434585; all regions closed. 2024-11-19T01:08:49,446 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:49,446 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:49,446 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:49,447 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:49,447 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:49,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741834_1010 (size=3066) 2024-11-19T01:08:49,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741834_1010 (size=3066) 2024-11-19T01:08:49,453 DEBUG [RS:0;5134ffc85563:45903 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/oldWALs 2024-11-19T01:08:49,453 INFO [RS:0;5134ffc85563:45903 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5134ffc85563%2C45903%2C1731978434585.meta:.meta(num 1731978436266) 2024-11-19T01:08:49,453 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:49,454 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:49,454 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:49,454 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:49,454 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:49,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741847_1023 (size=12695) 2024-11-19T01:08:49,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741847_1023 (size=12695) 2024-11-19T01:08:49,461 DEBUG [RS:0;5134ffc85563:45903 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/oldWALs 2024-11-19T01:08:49,461 INFO [RS:0;5134ffc85563:45903 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5134ffc85563%2C45903%2C1731978434585:(num 1731978509188) 2024-11-19T01:08:49,461 DEBUG [RS:0;5134ffc85563:45903 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:08:49,461 INFO [RS:0;5134ffc85563:45903 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T01:08:49,461 INFO [RS:0;5134ffc85563:45903 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T01:08:49,461 INFO [RS:0;5134ffc85563:45903 {}] hbase.ChoreService(370): Chore service for: regionserver/5134ffc85563:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T01:08:49,461 INFO [RS:0;5134ffc85563:45903 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T01:08:49,461 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T01:08:49,462 INFO [RS:0;5134ffc85563:45903 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45903 2024-11-19T01:08:49,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T01:08:49,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45903-0x101088886dd0001, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5134ffc85563,45903,1731978434585 2024-11-19T01:08:49,466 INFO [RS:0;5134ffc85563:45903 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T01:08:49,467 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5134ffc85563,45903,1731978434585] 2024-11-19T01:08:49,469 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5134ffc85563,45903,1731978434585 already deleted, retry=false 2024-11-19T01:08:49,469 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5134ffc85563,45903,1731978434585 expired; onlineServers=0 2024-11-19T01:08:49,469 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5134ffc85563,34043,1731978433911' ***** 2024-11-19T01:08:49,469 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T01:08:49,469 INFO [M:0;5134ffc85563:34043 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T01:08:49,469 INFO [M:0;5134ffc85563:34043 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T01:08:49,469 DEBUG [M:0;5134ffc85563:34043 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T01:08:49,470 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T01:08:49,470 DEBUG [M:0;5134ffc85563:34043 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T01:08:49,470 DEBUG [master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978435540 {}] cleaner.HFileCleaner(306): Exit Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978435540,5,FailOnTimeoutGroup] 2024-11-19T01:08:49,470 DEBUG [master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978435541 {}] cleaner.HFileCleaner(306): Exit Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978435541,5,FailOnTimeoutGroup] 2024-11-19T01:08:49,470 INFO [M:0;5134ffc85563:34043 {}] hbase.ChoreService(370): Chore service for: master/5134ffc85563:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T01:08:49,470 INFO [M:0;5134ffc85563:34043 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T01:08:49,470 DEBUG [M:0;5134ffc85563:34043 {}] master.HMaster(1795): Stopping service threads 2024-11-19T01:08:49,470 INFO [M:0;5134ffc85563:34043 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T01:08:49,470 INFO [M:0;5134ffc85563:34043 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T01:08:49,471 INFO [M:0;5134ffc85563:34043 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T01:08:49,471 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T01:08:49,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T01:08:49,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:49,472 DEBUG [M:0;5134ffc85563:34043 {}] zookeeper.ZKUtil(347): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T01:08:49,472 WARN [M:0;5134ffc85563:34043 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T01:08:49,472 INFO [M:0;5134ffc85563:34043 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/.lastflushedseqids 2024-11-19T01:08:49,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741854_1030 (size=130) 2024-11-19T01:08:49,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741854_1030 (size=130) 2024-11-19T01:08:49,485 INFO [M:0;5134ffc85563:34043 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T01:08:49,485 INFO [M:0;5134ffc85563:34043 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T01:08:49,485 DEBUG [M:0;5134ffc85563:34043 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T01:08:49,485 INFO [M:0;5134ffc85563:34043 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:08:49,485 DEBUG [M:0;5134ffc85563:34043 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:08:49,485 DEBUG [M:0;5134ffc85563:34043 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T01:08:49,485 DEBUG [M:0;5134ffc85563:34043 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:08:49,485 INFO [M:0;5134ffc85563:34043 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-11-19T01:08:49,503 DEBUG [M:0;5134ffc85563:34043 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6af96fb6b4084682afb06e0a086f462d is 82, key is hbase:meta,,1/info:regioninfo/1731978436345/Put/seqid=0 2024-11-19T01:08:49,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741855_1031 (size=5672) 2024-11-19T01:08:49,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741855_1031 (size=5672) 2024-11-19T01:08:49,509 INFO [M:0;5134ffc85563:34043 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6af96fb6b4084682afb06e0a086f462d 2024-11-19T01:08:49,531 DEBUG [M:0;5134ffc85563:34043 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ab9c393c369a461ba552f71fb7a49dbc is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731978437220/Put/seqid=0 2024-11-19T01:08:49,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741856_1032 (size=6248) 2024-11-19T01:08:49,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741856_1032 (size=6248) 2024-11-19T01:08:49,537 INFO [M:0;5134ffc85563:34043 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ab9c393c369a461ba552f71fb7a49dbc 2024-11-19T01:08:49,543 INFO [M:0;5134ffc85563:34043 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ab9c393c369a461ba552f71fb7a49dbc 2024-11-19T01:08:49,559 DEBUG [M:0;5134ffc85563:34043 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/af1e5a59968942adae0ab5074f75f861 is 69, key is 5134ffc85563,45903,1731978434585/rs:state/1731978435633/Put/seqid=0 2024-11-19T01:08:49,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741857_1033 (size=5156) 2024-11-19T01:08:49,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741857_1033 (size=5156) 2024-11-19T01:08:49,565 INFO [M:0;5134ffc85563:34043 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/af1e5a59968942adae0ab5074f75f861 2024-11-19T01:08:49,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45903-0x101088886dd0001, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:08:49,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45903-0x101088886dd0001, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:08:49,569 INFO [RS:0;5134ffc85563:45903 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T01:08:49,569 INFO [RS:0;5134ffc85563:45903 {}] regionserver.HRegionServer(1031): Exiting; stopping=5134ffc85563,45903,1731978434585; zookeeper connection closed. 2024-11-19T01:08:49,570 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@616030bb {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@616030bb 2024-11-19T01:08:49,570 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T01:08:49,595 DEBUG [M:0;5134ffc85563:34043 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/db4c252c12364f41b773fbbbee46d2e7 is 52, key is load_balancer_on/state:d/1731978436678/Put/seqid=0 2024-11-19T01:08:49,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741858_1034 (size=5056) 2024-11-19T01:08:49,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741858_1034 (size=5056) 2024-11-19T01:08:49,605 INFO [M:0;5134ffc85563:34043 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/db4c252c12364f41b773fbbbee46d2e7 2024-11-19T01:08:49,613 DEBUG [M:0;5134ffc85563:34043 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6af96fb6b4084682afb06e0a086f462d as hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6af96fb6b4084682afb06e0a086f462d 2024-11-19T01:08:49,621 INFO [M:0;5134ffc85563:34043 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6af96fb6b4084682afb06e0a086f462d, entries=8, sequenceid=59, filesize=5.5 K 2024-11-19T01:08:49,622 DEBUG [M:0;5134ffc85563:34043 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ab9c393c369a461ba552f71fb7a49dbc as hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ab9c393c369a461ba552f71fb7a49dbc 2024-11-19T01:08:49,629 INFO [M:0;5134ffc85563:34043 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ab9c393c369a461ba552f71fb7a49dbc 2024-11-19T01:08:49,629 INFO [M:0;5134ffc85563:34043 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ab9c393c369a461ba552f71fb7a49dbc, entries=6, sequenceid=59, filesize=6.1 K 2024-11-19T01:08:49,630 DEBUG [M:0;5134ffc85563:34043 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/af1e5a59968942adae0ab5074f75f861 as hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/af1e5a59968942adae0ab5074f75f861 2024-11-19T01:08:49,638 INFO [M:0;5134ffc85563:34043 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/af1e5a59968942adae0ab5074f75f861, entries=1, sequenceid=59, filesize=5.0 K 2024-11-19T01:08:49,639 DEBUG [M:0;5134ffc85563:34043 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/db4c252c12364f41b773fbbbee46d2e7 as hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/db4c252c12364f41b773fbbbee46d2e7 2024-11-19T01:08:49,645 INFO [M:0;5134ffc85563:34043 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/db4c252c12364f41b773fbbbee46d2e7, entries=1, sequenceid=59, filesize=4.9 K 2024-11-19T01:08:49,647 INFO [M:0;5134ffc85563:34043 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 162ms, sequenceid=59, compaction requested=false 2024-11-19T01:08:49,649 INFO [M:0;5134ffc85563:34043 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:08:49,649 DEBUG [M:0;5134ffc85563:34043 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731978529485Disabling compacts and flushes for region at 1731978529485Disabling writes for close at 1731978529485Obtaining lock to block concurrent updates at 1731978529485Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731978529485Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1731978529486 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731978529487 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731978529487Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731978529502 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731978529502Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731978529515 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731978529530 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731978529530Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731978529543 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731978529558 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731978529558Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731978529572 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731978529595 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731978529595Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@cc24d98: reopening flushed file at 1731978529612 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@227dd7aa: reopening flushed file at 1731978529621 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@705fab8e: reopening flushed file at 1731978529629 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45ff68e2: reopening flushed file at 1731978529638 (+9 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 162ms, sequenceid=59, compaction requested=false at 1731978529647 (+9 ms)Writing region close event to WAL at 1731978529648 (+1 ms)Closed at 1731978529648 2024-11-19T01:08:49,650 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:49,650 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:49,650 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:49,650 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:49,650 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:49,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45823 is added to blk_1073741830_1006 (size=27985) 2024-11-19T01:08:49,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741830_1006 (size=27985) 2024-11-19T01:08:49,654 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T01:08:49,654 INFO [M:0;5134ffc85563:34043 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T01:08:49,654 INFO [M:0;5134ffc85563:34043 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34043 2024-11-19T01:08:49,654 INFO [M:0;5134ffc85563:34043 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T01:08:49,710 INFO [regionserver/5134ffc85563:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T01:08:49,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:08:49,756 INFO [M:0;5134ffc85563:34043 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T01:08:49,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34043-0x101088886dd0000, quorum=127.0.0.1:59946, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:08:49,762 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:08:49,764 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:08:49,764 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:08:49,764 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:08:49,764 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/hadoop.log.dir/,STOPPED} 2024-11-19T01:08:49,767 WARN [BP-348894987-172.17.0.2-1731978430850 heartbeating to localhost/127.0.0.1:39161 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:08:49,767 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:08:49,767 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:08:49,767 WARN [BP-348894987-172.17.0.2-1731978430850 heartbeating to localhost/127.0.0.1:39161 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-348894987-172.17.0.2-1731978430850 (Datanode Uuid b8ca2166-9be6-456e-aa64-56cc6ec3bb13) service to localhost/127.0.0.1:39161 2024-11-19T01:08:49,769 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/cluster_d6304920-e7fc-4be0-8dd4-41eb67cface4/data/data3/current/BP-348894987-172.17.0.2-1731978430850 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:08:49,769 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/cluster_d6304920-e7fc-4be0-8dd4-41eb67cface4/data/data4/current/BP-348894987-172.17.0.2-1731978430850 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:08:49,770 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:08:49,772 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:08:49,772 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:08:49,772 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:08:49,772 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:08:49,773 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/hadoop.log.dir/,STOPPED} 2024-11-19T01:08:49,774 WARN [BP-348894987-172.17.0.2-1731978430850 heartbeating to localhost/127.0.0.1:39161 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:08:49,774 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:08:49,774 WARN [BP-348894987-172.17.0.2-1731978430850 heartbeating to localhost/127.0.0.1:39161 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-348894987-172.17.0.2-1731978430850 (Datanode Uuid c89a3ffa-f785-4086-94ac-6ec8e19eb86f) service to localhost/127.0.0.1:39161 2024-11-19T01:08:49,774 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:08:49,775 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/cluster_d6304920-e7fc-4be0-8dd4-41eb67cface4/data/data1/current/BP-348894987-172.17.0.2-1731978430850 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:08:49,775 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/cluster_d6304920-e7fc-4be0-8dd4-41eb67cface4/data/data2/current/BP-348894987-172.17.0.2-1731978430850 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:08:49,776 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:08:49,786 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T01:08:49,786 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:08:49,786 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:08:49,787 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:08:49,787 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/hadoop.log.dir/,STOPPED} 2024-11-19T01:08:49,796 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T01:08:49,829 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T01:08:49,839 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39161 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/5134ffc85563:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@3a0ff1da java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39161 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39161 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39161 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39161 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/5134ffc85563:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39161 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/5134ffc85563:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39161 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:39161 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=164 (was 194), ProcessCount=11 (was 11), AvailableMemoryMB=5036 (was 5224) 2024-11-19T01:08:49,846 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=164, ProcessCount=11, AvailableMemoryMB=5036 2024-11-19T01:08:49,846 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T01:08:49,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/hadoop.log.dir so I do NOT create it in target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37 2024-11-19T01:08:49,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb9b2053-f291-f5b6-d5b0-1c719ad3b4e5/hadoop.tmp.dir so I do NOT create it in target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37 2024-11-19T01:08:49,847 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/cluster_ad248f48-acec-2d2e-4287-076004853c7b, deleteOnExit=true 2024-11-19T01:08:49,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T01:08:49,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/test.cache.data in system properties and HBase conf 2024-11-19T01:08:49,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T01:08:49,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/hadoop.log.dir in system properties and HBase conf 2024-11-19T01:08:49,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T01:08:49,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T01:08:49,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T01:08:49,848 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T01:08:49,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T01:08:49,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T01:08:49,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T01:08:49,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T01:08:49,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T01:08:49,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T01:08:49,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T01:08:49,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T01:08:49,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T01:08:49,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/nfs.dump.dir in system properties and HBase conf 2024-11-19T01:08:49,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/java.io.tmpdir in system properties and HBase conf 2024-11-19T01:08:49,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T01:08:49,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T01:08:49,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T01:08:49,865 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T01:08:49,979 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:08:49,985 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:08:49,990 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:08:49,990 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:08:49,991 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T01:08:49,991 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:08:49,992 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46d26a79{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:08:49,993 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59505eb5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:08:50,112 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1edca743{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/java.io.tmpdir/jetty-localhost-42105-hadoop-hdfs-3_4_1-tests_jar-_-any-3943908873664981372/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T01:08:50,113 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c443180{HTTP/1.1, (http/1.1)}{localhost:42105} 2024-11-19T01:08:50,113 INFO [Time-limited test {}] server.Server(415): Started @101409ms 2024-11-19T01:08:50,128 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T01:08:50,211 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:08:50,216 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:08:50,217 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:08:50,217 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:08:50,217 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T01:08:50,217 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69a0f3c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:08:50,218 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@469dec96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:08:50,350 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1a15ed6a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/java.io.tmpdir/jetty-localhost-38537-hadoop-hdfs-3_4_1-tests_jar-_-any-14264366492082346012/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:08:50,350 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@b5aedfa{HTTP/1.1, (http/1.1)}{localhost:38537} 2024-11-19T01:08:50,351 INFO [Time-limited test {}] server.Server(415): Started @101647ms 2024-11-19T01:08:50,353 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:08:50,408 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:08:50,412 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:08:50,413 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:08:50,413 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:08:50,413 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T01:08:50,414 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@194f043a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:08:50,415 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@274298f3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:08:50,483 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/cluster_ad248f48-acec-2d2e-4287-076004853c7b/data/data1/current/BP-1849944892-172.17.0.2-1731978529884/current, will proceed with Du for space computation calculation, 2024-11-19T01:08:50,483 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/cluster_ad248f48-acec-2d2e-4287-076004853c7b/data/data2/current/BP-1849944892-172.17.0.2-1731978529884/current, will proceed with Du for space computation calculation, 2024-11-19T01:08:50,506 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:08:50,509 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6cda7d1035eb1661 with lease ID 0xabfbedb8a9c09c69: Processing first storage report for DS-944713e9-ebb7-491e-810c-bd6f8bfe885e from datanode DatanodeRegistration(127.0.0.1:45809, datanodeUuid=12bb7869-1b14-4df0-8ef4-77bf64e93432, infoPort=43769, infoSecurePort=0, ipcPort=43943, storageInfo=lv=-57;cid=testClusterID;nsid=1208284546;c=1731978529884) 2024-11-19T01:08:50,509 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6cda7d1035eb1661 with lease ID 0xabfbedb8a9c09c69: from storage DS-944713e9-ebb7-491e-810c-bd6f8bfe885e node DatanodeRegistration(127.0.0.1:45809, datanodeUuid=12bb7869-1b14-4df0-8ef4-77bf64e93432, infoPort=43769, infoSecurePort=0, ipcPort=43943, storageInfo=lv=-57;cid=testClusterID;nsid=1208284546;c=1731978529884), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:08:50,509 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6cda7d1035eb1661 with lease ID 0xabfbedb8a9c09c69: Processing first storage report for DS-4983e0ba-7e7c-45f9-b392-873bd914e912 from datanode DatanodeRegistration(127.0.0.1:45809, datanodeUuid=12bb7869-1b14-4df0-8ef4-77bf64e93432, infoPort=43769, infoSecurePort=0, ipcPort=43943, storageInfo=lv=-57;cid=testClusterID;nsid=1208284546;c=1731978529884) 2024-11-19T01:08:50,509 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6cda7d1035eb1661 with lease ID 0xabfbedb8a9c09c69: from storage DS-4983e0ba-7e7c-45f9-b392-873bd914e912 node DatanodeRegistration(127.0.0.1:45809, datanodeUuid=12bb7869-1b14-4df0-8ef4-77bf64e93432, infoPort=43769, infoSecurePort=0, ipcPort=43943, storageInfo=lv=-57;cid=testClusterID;nsid=1208284546;c=1731978529884), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:08:50,536 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@18492d7d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/java.io.tmpdir/jetty-localhost-33937-hadoop-hdfs-3_4_1-tests_jar-_-any-6352183900613270355/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:08:50,537 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@30ebe7e3{HTTP/1.1, (http/1.1)}{localhost:33937} 2024-11-19T01:08:50,537 INFO [Time-limited test {}] server.Server(415): Started @101833ms 2024-11-19T01:08:50,539 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:08:50,623 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/cluster_ad248f48-acec-2d2e-4287-076004853c7b/data/data3/current/BP-1849944892-172.17.0.2-1731978529884/current, will proceed with Du for space computation calculation, 2024-11-19T01:08:50,623 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/cluster_ad248f48-acec-2d2e-4287-076004853c7b/data/data4/current/BP-1849944892-172.17.0.2-1731978529884/current, will proceed with Du for space computation calculation, 2024-11-19T01:08:50,650 WARN [Thread-452 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:08:50,653 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b3f8e0328f2c9a2 with lease ID 0xabfbedb8a9c09c6a: Processing first storage report for DS-84f9faff-1bce-4839-89bc-225b0285bbbb from datanode DatanodeRegistration(127.0.0.1:40715, datanodeUuid=0f49fc78-6bb7-4826-bf40-e3f9064b1f4a, infoPort=36831, infoSecurePort=0, ipcPort=36087, storageInfo=lv=-57;cid=testClusterID;nsid=1208284546;c=1731978529884) 2024-11-19T01:08:50,653 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b3f8e0328f2c9a2 with lease ID 0xabfbedb8a9c09c6a: from storage DS-84f9faff-1bce-4839-89bc-225b0285bbbb node DatanodeRegistration(127.0.0.1:40715, datanodeUuid=0f49fc78-6bb7-4826-bf40-e3f9064b1f4a, infoPort=36831, infoSecurePort=0, ipcPort=36087, storageInfo=lv=-57;cid=testClusterID;nsid=1208284546;c=1731978529884), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:08:50,653 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b3f8e0328f2c9a2 with lease ID 0xabfbedb8a9c09c6a: Processing first storage report for DS-9ebdc4b1-4194-47e2-a40f-3b859d70159e from datanode DatanodeRegistration(127.0.0.1:40715, datanodeUuid=0f49fc78-6bb7-4826-bf40-e3f9064b1f4a, infoPort=36831, infoSecurePort=0, ipcPort=36087, storageInfo=lv=-57;cid=testClusterID;nsid=1208284546;c=1731978529884) 2024-11-19T01:08:50,653 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b3f8e0328f2c9a2 with lease ID 0xabfbedb8a9c09c6a: from storage DS-9ebdc4b1-4194-47e2-a40f-3b859d70159e node DatanodeRegistration(127.0.0.1:40715, datanodeUuid=0f49fc78-6bb7-4826-bf40-e3f9064b1f4a, infoPort=36831, infoSecurePort=0, ipcPort=36087, storageInfo=lv=-57;cid=testClusterID;nsid=1208284546;c=1731978529884), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:08:50,669 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37 2024-11-19T01:08:50,672 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/cluster_ad248f48-acec-2d2e-4287-076004853c7b/zookeeper_0, clientPort=49840, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/cluster_ad248f48-acec-2d2e-4287-076004853c7b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/cluster_ad248f48-acec-2d2e-4287-076004853c7b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T01:08:50,673 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49840 2024-11-19T01:08:50,673 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:08:50,675 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:08:50,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40715 is added to blk_1073741825_1001 (size=7) 2024-11-19T01:08:50,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45809 is added to blk_1073741825_1001 (size=7) 2024-11-19T01:08:50,690 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa with version=8 2024-11-19T01:08:50,690 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/hbase-staging 2024-11-19T01:08:50,692 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5134ffc85563:0 server-side Connection retries=45 2024-11-19T01:08:50,692 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:08:50,693 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T01:08:50,693 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T01:08:50,693 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:08:50,693 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T01:08:50,693 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T01:08:50,693 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T01:08:50,694 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39885 2024-11-19T01:08:50,695 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39885 connecting to ZooKeeper ensemble=127.0.0.1:49840 2024-11-19T01:08:50,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:398850x0, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T01:08:50,701 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39885-0x101088a04190000 connected 2024-11-19T01:08:50,722 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:08:50,724 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:08:50,727 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:08:50,727 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa, hbase.cluster.distributed=false 2024-11-19T01:08:50,729 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T01:08:50,733 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39885 2024-11-19T01:08:50,734 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39885 2024-11-19T01:08:50,734 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39885 2024-11-19T01:08:50,736 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39885 2024-11-19T01:08:50,737 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39885 2024-11-19T01:08:50,754 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5134ffc85563:0 server-side Connection retries=45 2024-11-19T01:08:50,754 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:08:50,754 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T01:08:50,755 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T01:08:50,755 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:08:50,755 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T01:08:50,755 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T01:08:50,755 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T01:08:50,756 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40979 2024-11-19T01:08:50,757 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40979 connecting to ZooKeeper ensemble=127.0.0.1:49840 2024-11-19T01:08:50,758 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:08:50,760 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:08:50,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:409790x0, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T01:08:50,767 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40979-0x101088a04190001 connected 2024-11-19T01:08:50,767 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:08:50,767 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T01:08:50,770 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T01:08:50,770 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T01:08:50,771 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T01:08:50,777 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40979 2024-11-19T01:08:50,780 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40979 2024-11-19T01:08:50,781 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40979 2024-11-19T01:08:50,782 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40979 2024-11-19T01:08:50,782 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40979 2024-11-19T01:08:50,796 DEBUG [M:0;5134ffc85563:39885 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5134ffc85563:39885 2024-11-19T01:08:50,797 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5134ffc85563,39885,1731978530692 2024-11-19T01:08:50,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:08:50,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:08:50,799 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5134ffc85563,39885,1731978530692 2024-11-19T01:08:50,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:50,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T01:08:50,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:50,802 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T01:08:50,802 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5134ffc85563,39885,1731978530692 from backup master directory 2024-11-19T01:08:50,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5134ffc85563,39885,1731978530692 2024-11-19T01:08:50,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:08:50,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:08:50,804 WARN [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T01:08:50,804 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5134ffc85563,39885,1731978530692 2024-11-19T01:08:50,810 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/hbase.id] with ID: 31c77c0a-91ca-4dfa-b2e2-994f439768d7 2024-11-19T01:08:50,810 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/.tmp/hbase.id 2024-11-19T01:08:50,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40715 is added to blk_1073741826_1002 (size=42) 2024-11-19T01:08:50,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45809 is added to blk_1073741826_1002 (size=42) 2024-11-19T01:08:50,821 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/.tmp/hbase.id]:[hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/hbase.id] 2024-11-19T01:08:50,839 INFO [master/5134ffc85563:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:08:50,839 INFO [master/5134ffc85563:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T01:08:50,841 INFO [master/5134ffc85563:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-19T01:08:50,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:50,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:50,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45809 is added to blk_1073741827_1003 (size=196) 2024-11-19T01:08:50,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40715 is added to blk_1073741827_1003 (size=196) 2024-11-19T01:08:50,854 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T01:08:50,855 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T01:08:50,856 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:08:50,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45809 is added to blk_1073741828_1004 (size=1189) 2024-11-19T01:08:50,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40715 is added to blk_1073741828_1004 (size=1189) 2024-11-19T01:08:50,868 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store 2024-11-19T01:08:50,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40715 is added to blk_1073741829_1005 (size=34) 2024-11-19T01:08:50,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45809 is added to blk_1073741829_1005 (size=34) 2024-11-19T01:08:50,877 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:08:50,877 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T01:08:50,877 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:08:50,877 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:08:50,877 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T01:08:50,877 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:08:50,877 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:08:50,878 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731978530877Disabling compacts and flushes for region at 1731978530877Disabling writes for close at 1731978530877Writing region close event to WAL at 1731978530877Closed at 1731978530877 2024-11-19T01:08:50,879 WARN [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/.initializing 2024-11-19T01:08:50,879 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/WALs/5134ffc85563,39885,1731978530692 2024-11-19T01:08:50,883 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C39885%2C1731978530692, suffix=, logDir=hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/WALs/5134ffc85563,39885,1731978530692, archiveDir=hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/oldWALs, maxLogs=10 2024-11-19T01:08:50,883 INFO [master/5134ffc85563:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C39885%2C1731978530692.1731978530883 2024-11-19T01:08:50,889 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/WALs/5134ffc85563,39885,1731978530692/5134ffc85563%2C39885%2C1731978530692.1731978530883 2024-11-19T01:08:50,894 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36831:36831),(127.0.0.1/127.0.0.1:43769:43769)] 2024-11-19T01:08:50,895 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T01:08:50,895 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:08:50,895 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:50,895 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:50,897 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:50,898 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T01:08:50,898 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:50,899 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:08:50,899 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:50,900 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T01:08:50,901 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:50,901 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:08:50,901 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:50,903 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T01:08:50,903 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:50,904 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:08:50,904 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:50,905 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T01:08:50,906 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:50,906 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:08:50,906 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:50,907 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:50,908 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:50,910 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:50,910 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:50,910 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T01:08:50,912 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:50,918 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T01:08:50,918 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=873185, jitterRate=0.11031275987625122}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T01:08:50,919 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731978530895Initializing all the Stores at 1731978530896 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978530896Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978530896Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978530897 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978530897Cleaning up temporary data from old regions at 1731978530910 (+13 ms)Region opened successfully at 1731978530919 (+9 ms) 2024-11-19T01:08:50,921 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T01:08:50,926 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a8cfdcc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5134ffc85563/172.17.0.2:0 2024-11-19T01:08:50,927 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T01:08:50,927 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T01:08:50,927 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T01:08:50,927 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T01:08:50,928 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T01:08:50,928 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T01:08:50,928 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T01:08:50,931 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T01:08:50,931 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T01:08:50,933 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T01:08:50,933 INFO [master/5134ffc85563:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T01:08:50,934 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T01:08:50,935 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T01:08:50,935 INFO [master/5134ffc85563:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T01:08:50,936 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T01:08:50,937 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T01:08:50,939 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T01:08:50,940 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T01:08:50,942 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T01:08:50,944 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T01:08:50,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T01:08:50,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:50,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T01:08:50,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:50,946 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5134ffc85563,39885,1731978530692, sessionid=0x101088a04190000, setting cluster-up flag (Was=false) 2024-11-19T01:08:50,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:50,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:50,954 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T01:08:50,955 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5134ffc85563,39885,1731978530692 2024-11-19T01:08:50,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:50,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:50,963 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T01:08:50,964 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5134ffc85563,39885,1731978530692 2024-11-19T01:08:50,965 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T01:08:50,967 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T01:08:50,967 INFO [master/5134ffc85563:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T01:08:50,967 INFO [master/5134ffc85563:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T01:08:50,968 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5134ffc85563,39885,1731978530692 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T01:08:50,969 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:08:50,969 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:08:50,969 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:08:50,969 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:08:50,969 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5134ffc85563:0, corePoolSize=10, maxPoolSize=10 2024-11-19T01:08:50,969 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:50,969 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5134ffc85563:0, corePoolSize=2, maxPoolSize=2 2024-11-19T01:08:50,969 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:50,970 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731978560970 2024-11-19T01:08:50,970 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T01:08:50,970 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T01:08:50,970 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T01:08:50,970 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T01:08:50,970 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T01:08:50,970 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T01:08:50,971 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:50,971 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T01:08:50,971 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:08:50,971 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T01:08:50,971 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T01:08:50,971 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T01:08:50,972 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T01:08:50,972 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T01:08:50,972 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978530972,5,FailOnTimeoutGroup] 2024-11-19T01:08:50,972 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978530972,5,FailOnTimeoutGroup] 2024-11-19T01:08:50,972 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:50,972 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T01:08:50,972 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:50,972 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:50,973 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:50,973 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T01:08:50,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40715 is added to blk_1073741831_1007 (size=1321) 2024-11-19T01:08:50,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45809 is added to blk_1073741831_1007 (size=1321) 2024-11-19T01:08:50,981 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T01:08:50,982 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa 2024-11-19T01:08:50,984 INFO [RS:0;5134ffc85563:40979 {}] regionserver.HRegionServer(746): ClusterId : 31c77c0a-91ca-4dfa-b2e2-994f439768d7 2024-11-19T01:08:50,984 DEBUG [RS:0;5134ffc85563:40979 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T01:08:50,986 DEBUG [RS:0;5134ffc85563:40979 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T01:08:50,986 DEBUG [RS:0;5134ffc85563:40979 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T01:08:50,989 DEBUG [RS:0;5134ffc85563:40979 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T01:08:50,989 DEBUG [RS:0;5134ffc85563:40979 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f0148c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5134ffc85563/172.17.0.2:0 2024-11-19T01:08:50,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40715 is added to blk_1073741832_1008 (size=32) 2024-11-19T01:08:50,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45809 is added to blk_1073741832_1008 (size=32) 2024-11-19T01:08:51,002 DEBUG [RS:0;5134ffc85563:40979 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5134ffc85563:40979 2024-11-19T01:08:51,002 INFO [RS:0;5134ffc85563:40979 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T01:08:51,002 INFO [RS:0;5134ffc85563:40979 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T01:08:51,002 DEBUG [RS:0;5134ffc85563:40979 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T01:08:51,003 INFO [RS:0;5134ffc85563:40979 {}] regionserver.HRegionServer(2659): reportForDuty to master=5134ffc85563,39885,1731978530692 with port=40979, startcode=1731978530754 2024-11-19T01:08:51,003 DEBUG [RS:0;5134ffc85563:40979 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T01:08:51,006 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57359, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T01:08:51,007 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39885 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5134ffc85563,40979,1731978530754 2024-11-19T01:08:51,007 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39885 {}] master.ServerManager(517): Registering regionserver=5134ffc85563,40979,1731978530754 2024-11-19T01:08:51,009 DEBUG [RS:0;5134ffc85563:40979 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa 2024-11-19T01:08:51,009 DEBUG [RS:0;5134ffc85563:40979 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39165 2024-11-19T01:08:51,009 DEBUG [RS:0;5134ffc85563:40979 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T01:08:51,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T01:08:51,011 DEBUG [RS:0;5134ffc85563:40979 {}] zookeeper.ZKUtil(111): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5134ffc85563,40979,1731978530754 2024-11-19T01:08:51,011 WARN [RS:0;5134ffc85563:40979 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T01:08:51,011 INFO [RS:0;5134ffc85563:40979 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:08:51,011 DEBUG [RS:0;5134ffc85563:40979 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/WALs/5134ffc85563,40979,1731978530754 2024-11-19T01:08:51,012 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5134ffc85563,40979,1731978530754] 2024-11-19T01:08:51,015 INFO [RS:0;5134ffc85563:40979 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T01:08:51,018 INFO [RS:0;5134ffc85563:40979 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T01:08:51,019 INFO [RS:0;5134ffc85563:40979 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T01:08:51,019 INFO [RS:0;5134ffc85563:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:51,019 INFO [RS:0;5134ffc85563:40979 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T01:08:51,020 INFO [RS:0;5134ffc85563:40979 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T01:08:51,020 INFO [RS:0;5134ffc85563:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:51,020 DEBUG [RS:0;5134ffc85563:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:51,020 DEBUG [RS:0;5134ffc85563:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:51,020 DEBUG [RS:0;5134ffc85563:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:51,021 DEBUG [RS:0;5134ffc85563:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:51,021 DEBUG [RS:0;5134ffc85563:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:51,021 DEBUG [RS:0;5134ffc85563:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5134ffc85563:0, corePoolSize=2, maxPoolSize=2 2024-11-19T01:08:51,021 DEBUG [RS:0;5134ffc85563:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:51,021 DEBUG [RS:0;5134ffc85563:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:51,021 DEBUG [RS:0;5134ffc85563:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:51,021 DEBUG [RS:0;5134ffc85563:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:51,021 DEBUG [RS:0;5134ffc85563:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:51,021 DEBUG [RS:0;5134ffc85563:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:51,021 DEBUG [RS:0;5134ffc85563:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5134ffc85563:0, corePoolSize=3, maxPoolSize=3 2024-11-19T01:08:51,021 DEBUG [RS:0;5134ffc85563:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0, corePoolSize=3, maxPoolSize=3 2024-11-19T01:08:51,022 INFO [RS:0;5134ffc85563:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:51,022 INFO [RS:0;5134ffc85563:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:51,022 INFO [RS:0;5134ffc85563:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:51,022 INFO [RS:0;5134ffc85563:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:51,022 INFO [RS:0;5134ffc85563:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:51,022 INFO [RS:0;5134ffc85563:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,40979,1731978530754-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T01:08:51,041 INFO [RS:0;5134ffc85563:40979 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T01:08:51,041 INFO [RS:0;5134ffc85563:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,40979,1731978530754-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:51,041 INFO [RS:0;5134ffc85563:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:51,041 INFO [RS:0;5134ffc85563:40979 {}] regionserver.Replication(171): 5134ffc85563,40979,1731978530754 started 2024-11-19T01:08:51,057 INFO [RS:0;5134ffc85563:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:51,057 INFO [RS:0;5134ffc85563:40979 {}] regionserver.HRegionServer(1482): Serving as 5134ffc85563,40979,1731978530754, RpcServer on 5134ffc85563/172.17.0.2:40979, sessionid=0x101088a04190001 2024-11-19T01:08:51,057 DEBUG [RS:0;5134ffc85563:40979 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T01:08:51,057 DEBUG [RS:0;5134ffc85563:40979 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5134ffc85563,40979,1731978530754 2024-11-19T01:08:51,057 DEBUG [RS:0;5134ffc85563:40979 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5134ffc85563,40979,1731978530754' 2024-11-19T01:08:51,057 DEBUG [RS:0;5134ffc85563:40979 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T01:08:51,058 DEBUG [RS:0;5134ffc85563:40979 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T01:08:51,058 DEBUG [RS:0;5134ffc85563:40979 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T01:08:51,058 DEBUG [RS:0;5134ffc85563:40979 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T01:08:51,058 DEBUG [RS:0;5134ffc85563:40979 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5134ffc85563,40979,1731978530754 2024-11-19T01:08:51,058 DEBUG [RS:0;5134ffc85563:40979 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5134ffc85563,40979,1731978530754' 2024-11-19T01:08:51,058 DEBUG [RS:0;5134ffc85563:40979 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T01:08:51,059 DEBUG [RS:0;5134ffc85563:40979 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T01:08:51,059 DEBUG [RS:0;5134ffc85563:40979 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T01:08:51,059 INFO [RS:0;5134ffc85563:40979 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T01:08:51,059 INFO [RS:0;5134ffc85563:40979 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T01:08:51,162 INFO [RS:0;5134ffc85563:40979 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C40979%2C1731978530754, suffix=, logDir=hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/WALs/5134ffc85563,40979,1731978530754, archiveDir=hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/oldWALs, maxLogs=32 2024-11-19T01:08:51,164 INFO [RS:0;5134ffc85563:40979 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C40979%2C1731978530754.1731978531164 2024-11-19T01:08:51,171 INFO [RS:0;5134ffc85563:40979 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/WALs/5134ffc85563,40979,1731978530754/5134ffc85563%2C40979%2C1731978530754.1731978531164 2024-11-19T01:08:51,178 DEBUG [RS:0;5134ffc85563:40979 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43769:43769),(127.0.0.1/127.0.0.1:36831:36831)] 2024-11-19T01:08:51,391 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:08:51,393 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T01:08:51,395 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T01:08:51,395 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:51,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:08:51,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T01:08:51,398 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T01:08:51,398 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:51,398 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:08:51,398 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T01:08:51,400 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T01:08:51,400 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:51,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:08:51,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T01:08:51,402 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T01:08:51,402 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:51,402 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:08:51,403 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T01:08:51,403 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/data/hbase/meta/1588230740 2024-11-19T01:08:51,404 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/data/hbase/meta/1588230740 2024-11-19T01:08:51,405 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T01:08:51,405 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T01:08:51,406 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T01:08:51,408 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T01:08:51,410 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T01:08:51,411 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=792256, jitterRate=0.007405951619148254}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T01:08:51,412 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731978531391Initializing all the Stores at 1731978531393 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978531393Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978531393Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978531393Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978531393Cleaning up temporary data from old regions at 1731978531405 (+12 ms)Region opened successfully at 1731978531412 (+7 ms) 2024-11-19T01:08:51,413 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T01:08:51,413 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T01:08:51,413 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T01:08:51,413 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T01:08:51,413 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T01:08:51,414 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T01:08:51,414 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731978531413Disabling compacts and flushes for region at 1731978531413Disabling writes for close at 1731978531413Writing region close event to WAL at 1731978531413Closed at 1731978531413 2024-11-19T01:08:51,416 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:08:51,416 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T01:08:51,416 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T01:08:51,419 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T01:08:51,420 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T01:08:51,571 DEBUG [5134ffc85563:39885 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T01:08:51,572 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5134ffc85563,40979,1731978530754 2024-11-19T01:08:51,574 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5134ffc85563,40979,1731978530754, state=OPENING 2024-11-19T01:08:51,576 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T01:08:51,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:51,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:51,580 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T01:08:51,580 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:08:51,580 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5134ffc85563,40979,1731978530754}] 2024-11-19T01:08:51,580 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:08:51,735 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T01:08:51,738 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49525, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T01:08:51,745 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T01:08:51,745 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:08:51,748 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C40979%2C1731978530754.meta, suffix=.meta, logDir=hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/WALs/5134ffc85563,40979,1731978530754, archiveDir=hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/oldWALs, maxLogs=32 2024-11-19T01:08:51,751 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C40979%2C1731978530754.meta.1731978531751.meta 2024-11-19T01:08:51,765 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/WALs/5134ffc85563,40979,1731978530754/5134ffc85563%2C40979%2C1731978530754.meta.1731978531751.meta 2024-11-19T01:08:51,773 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36831:36831),(127.0.0.1/127.0.0.1:43769:43769)] 2024-11-19T01:08:51,776 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T01:08:51,777 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T01:08:51,777 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T01:08:51,777 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T01:08:51,777 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T01:08:51,777 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:08:51,777 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T01:08:51,777 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T01:08:51,781 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T01:08:51,783 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T01:08:51,783 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:51,784 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:08:51,784 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T01:08:51,786 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T01:08:51,786 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:51,787 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:08:51,787 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T01:08:51,788 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T01:08:51,788 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:51,789 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:08:51,789 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T01:08:51,791 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T01:08:51,791 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:51,791 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:08:51,792 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T01:08:51,793 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/data/hbase/meta/1588230740 2024-11-19T01:08:51,795 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/data/hbase/meta/1588230740 2024-11-19T01:08:51,797 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T01:08:51,797 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T01:08:51,798 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T01:08:51,801 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T01:08:51,803 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=796967, jitterRate=0.013396039605140686}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T01:08:51,803 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T01:08:51,805 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731978531778Writing region info on filesystem at 1731978531778Initializing all the Stores at 1731978531779 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978531779Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978531781 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978531781Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978531781Cleaning up temporary data from old regions at 1731978531797 (+16 ms)Running coprocessor post-open hooks at 1731978531803 (+6 ms)Region opened successfully at 1731978531805 (+2 ms) 2024-11-19T01:08:51,809 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731978531735 2024-11-19T01:08:51,820 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T01:08:51,820 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T01:08:51,821 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5134ffc85563,40979,1731978530754 2024-11-19T01:08:51,823 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5134ffc85563,40979,1731978530754, state=OPEN 2024-11-19T01:08:51,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T01:08:51,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T01:08:51,827 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:08:51,827 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:08:51,828 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5134ffc85563,40979,1731978530754 2024-11-19T01:08:51,834 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T01:08:51,834 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5134ffc85563,40979,1731978530754 in 249 msec 2024-11-19T01:08:51,837 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T01:08:51,837 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 418 msec 2024-11-19T01:08:51,838 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:08:51,839 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T01:08:51,843 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T01:08:51,843 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5134ffc85563,40979,1731978530754, seqNum=-1] 2024-11-19T01:08:51,843 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T01:08:51,845 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39787, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T01:08:51,857 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 889 msec 2024-11-19T01:08:51,857 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731978531857, completionTime=-1 2024-11-19T01:08:51,857 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T01:08:51,857 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T01:08:51,860 INFO [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T01:08:51,861 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731978591861 2024-11-19T01:08:51,861 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731978651861 2024-11-19T01:08:51,861 INFO [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-11-19T01:08:51,861 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,39885,1731978530692-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:51,861 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,39885,1731978530692-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:51,861 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,39885,1731978530692-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:51,862 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5134ffc85563:39885, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:51,862 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:51,862 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:51,866 DEBUG [master/5134ffc85563:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T01:08:51,869 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.065sec 2024-11-19T01:08:51,869 INFO [master/5134ffc85563:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T01:08:51,869 INFO [master/5134ffc85563:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T01:08:51,869 INFO [master/5134ffc85563:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T01:08:51,869 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T01:08:51,869 INFO [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T01:08:51,869 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,39885,1731978530692-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T01:08:51,869 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,39885,1731978530692-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T01:08:51,873 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T01:08:51,873 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T01:08:51,873 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,39885,1731978530692-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:51,885 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@210f4ee9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:08:51,885 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5134ffc85563,39885,-1 for getting cluster id 2024-11-19T01:08:51,885 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T01:08:51,890 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '31c77c0a-91ca-4dfa-b2e2-994f439768d7' 2024-11-19T01:08:51,899 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T01:08:51,899 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "31c77c0a-91ca-4dfa-b2e2-994f439768d7" 2024-11-19T01:08:51,902 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@647d7b64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:08:51,902 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5134ffc85563,39885,-1] 2024-11-19T01:08:51,902 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T01:08:51,903 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:08:51,906 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48550, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T01:08:51,910 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bf88cd2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:08:51,911 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T01:08:51,912 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5134ffc85563,40979,1731978530754, seqNum=-1] 2024-11-19T01:08:51,913 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T01:08:51,915 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43662, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T01:08:51,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5134ffc85563,39885,1731978530692 2024-11-19T01:08:51,919 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:08:51,923 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T01:08:51,923 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T01:08:51,923 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T01:08:51,924 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:08:51,924 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:08:51,924 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:08:51,924 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T01:08:51,925 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T01:08:51,925 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1644424511, stopped=false 2024-11-19T01:08:51,925 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5134ffc85563,39885,1731978530692 2024-11-19T01:08:51,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T01:08:51,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T01:08:51,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:51,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:51,927 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T01:08:51,927 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T01:08:51,927 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:08:51,927 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:08:51,928 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5134ffc85563,40979,1731978530754' ***** 2024-11-19T01:08:51,928 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T01:08:51,928 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:08:51,928 INFO [RS:0;5134ffc85563:40979 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T01:08:51,928 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:08:51,928 INFO [RS:0;5134ffc85563:40979 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T01:08:51,928 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T01:08:51,928 INFO [RS:0;5134ffc85563:40979 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T01:08:51,928 INFO [RS:0;5134ffc85563:40979 {}] regionserver.HRegionServer(959): stopping server 5134ffc85563,40979,1731978530754 2024-11-19T01:08:51,928 INFO [RS:0;5134ffc85563:40979 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T01:08:51,928 INFO [RS:0;5134ffc85563:40979 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5134ffc85563:40979. 2024-11-19T01:08:51,929 DEBUG [RS:0;5134ffc85563:40979 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:08:51,929 DEBUG [RS:0;5134ffc85563:40979 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:08:51,929 INFO [RS:0;5134ffc85563:40979 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T01:08:51,929 INFO [RS:0;5134ffc85563:40979 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T01:08:51,929 INFO [RS:0;5134ffc85563:40979 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T01:08:51,929 INFO [RS:0;5134ffc85563:40979 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T01:08:51,930 INFO [RS:0;5134ffc85563:40979 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-19T01:08:51,930 DEBUG [RS:0;5134ffc85563:40979 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-19T01:08:51,930 DEBUG [RS:0;5134ffc85563:40979 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-19T01:08:51,931 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T01:08:51,931 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T01:08:51,931 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T01:08:51,931 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T01:08:51,931 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T01:08:51,931 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-19T01:08:51,956 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/data/hbase/meta/1588230740/.tmp/ns/4c5fcc296cce4a67988b7ea617accb91 is 43, key is default/ns:d/1731978531846/Put/seqid=0 2024-11-19T01:08:51,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45809 is added to blk_1073741835_1011 (size=5153) 2024-11-19T01:08:51,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40715 is added to blk_1073741835_1011 (size=5153) 2024-11-19T01:08:51,971 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/data/hbase/meta/1588230740/.tmp/ns/4c5fcc296cce4a67988b7ea617accb91 2024-11-19T01:08:51,984 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/data/hbase/meta/1588230740/.tmp/ns/4c5fcc296cce4a67988b7ea617accb91 as hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/data/hbase/meta/1588230740/ns/4c5fcc296cce4a67988b7ea617accb91 2024-11-19T01:08:51,995 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/data/hbase/meta/1588230740/ns/4c5fcc296cce4a67988b7ea617accb91, entries=2, sequenceid=6, filesize=5.0 K 2024-11-19T01:08:51,997 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 66ms, sequenceid=6, compaction requested=false 2024-11-19T01:08:51,997 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T01:08:52,003 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-19T01:08:52,004 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T01:08:52,004 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T01:08:52,004 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731978531931Running coprocessor pre-close hooks at 1731978531931Disabling compacts and flushes for region at 1731978531931Disabling writes for close at 1731978531931Obtaining lock to block concurrent updates at 1731978531931Preparing flush snapshotting stores in 1588230740 at 1731978531931Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731978531932 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731978531933 (+1 ms)Flushing 1588230740/ns: creating writer at 1731978531933Flushing 1588230740/ns: appending metadata at 1731978531956 (+23 ms)Flushing 1588230740/ns: closing flushed file at 1731978531956Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1d6efe76: reopening flushed file at 1731978531983 (+27 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 66ms, sequenceid=6, compaction requested=false at 1731978531997 (+14 ms)Writing region close event to WAL at 1731978531999 (+2 ms)Running coprocessor post-close hooks at 1731978532004 (+5 ms)Closed at 1731978532004 2024-11-19T01:08:52,004 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T01:08:52,035 INFO [regionserver/5134ffc85563:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T01:08:52,035 INFO [regionserver/5134ffc85563:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T01:08:52,131 INFO [RS:0;5134ffc85563:40979 {}] regionserver.HRegionServer(976): stopping server 5134ffc85563,40979,1731978530754; all regions closed. 2024-11-19T01:08:52,131 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:52,132 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:52,132 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:52,132 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:52,132 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:52,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45809 is added to blk_1073741834_1010 (size=1152) 2024-11-19T01:08:52,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40715 is added to blk_1073741834_1010 (size=1152) 2024-11-19T01:08:52,138 DEBUG [RS:0;5134ffc85563:40979 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/oldWALs 2024-11-19T01:08:52,138 INFO [RS:0;5134ffc85563:40979 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5134ffc85563%2C40979%2C1731978530754.meta:.meta(num 1731978531751) 2024-11-19T01:08:52,138 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:52,139 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:52,139 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:52,139 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:52,139 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:52,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40715 is added to blk_1073741833_1009 (size=93) 2024-11-19T01:08:52,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45809 is added to blk_1073741833_1009 (size=93) 2024-11-19T01:08:52,144 DEBUG [RS:0;5134ffc85563:40979 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/oldWALs 2024-11-19T01:08:52,144 INFO [RS:0;5134ffc85563:40979 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5134ffc85563%2C40979%2C1731978530754:(num 1731978531164) 2024-11-19T01:08:52,144 DEBUG [RS:0;5134ffc85563:40979 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:08:52,144 INFO [RS:0;5134ffc85563:40979 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T01:08:52,144 INFO [RS:0;5134ffc85563:40979 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T01:08:52,145 INFO [RS:0;5134ffc85563:40979 {}] hbase.ChoreService(370): Chore service for: regionserver/5134ffc85563:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-19T01:08:52,145 INFO [RS:0;5134ffc85563:40979 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T01:08:52,145 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T01:08:52,145 INFO [RS:0;5134ffc85563:40979 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40979 2024-11-19T01:08:52,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5134ffc85563,40979,1731978530754 2024-11-19T01:08:52,147 INFO [RS:0;5134ffc85563:40979 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T01:08:52,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T01:08:52,148 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5134ffc85563,40979,1731978530754] 2024-11-19T01:08:52,150 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5134ffc85563,40979,1731978530754 already deleted, retry=false 2024-11-19T01:08:52,150 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5134ffc85563,40979,1731978530754 expired; onlineServers=0 2024-11-19T01:08:52,150 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5134ffc85563,39885,1731978530692' ***** 2024-11-19T01:08:52,150 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T01:08:52,151 INFO [M:0;5134ffc85563:39885 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T01:08:52,151 INFO [M:0;5134ffc85563:39885 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T01:08:52,151 DEBUG [M:0;5134ffc85563:39885 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T01:08:52,151 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T01:08:52,151 DEBUG [M:0;5134ffc85563:39885 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T01:08:52,151 DEBUG [master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978530972 {}] cleaner.HFileCleaner(306): Exit Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978530972,5,FailOnTimeoutGroup] 2024-11-19T01:08:52,151 INFO [M:0;5134ffc85563:39885 {}] hbase.ChoreService(370): Chore service for: master/5134ffc85563:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T01:08:52,151 INFO [M:0;5134ffc85563:39885 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T01:08:52,151 DEBUG [M:0;5134ffc85563:39885 {}] master.HMaster(1795): Stopping service threads 2024-11-19T01:08:52,151 INFO [M:0;5134ffc85563:39885 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T01:08:52,151 INFO [M:0;5134ffc85563:39885 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T01:08:52,152 INFO [M:0;5134ffc85563:39885 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T01:08:52,152 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T01:08:52,152 DEBUG [master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978530972 {}] cleaner.HFileCleaner(306): Exit Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978530972,5,FailOnTimeoutGroup] 2024-11-19T01:08:52,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T01:08:52,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:52,153 DEBUG [M:0;5134ffc85563:39885 {}] zookeeper.ZKUtil(347): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T01:08:52,153 WARN [M:0;5134ffc85563:39885 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T01:08:52,153 INFO [M:0;5134ffc85563:39885 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/.lastflushedseqids 2024-11-19T01:08:52,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45809 is added to blk_1073741836_1012 (size=99) 2024-11-19T01:08:52,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40715 is added to blk_1073741836_1012 (size=99) 2024-11-19T01:08:52,166 INFO [M:0;5134ffc85563:39885 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T01:08:52,166 INFO [M:0;5134ffc85563:39885 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T01:08:52,166 DEBUG [M:0;5134ffc85563:39885 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T01:08:52,166 INFO [M:0;5134ffc85563:39885 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:08:52,166 DEBUG [M:0;5134ffc85563:39885 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:08:52,166 DEBUG [M:0;5134ffc85563:39885 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T01:08:52,167 DEBUG [M:0;5134ffc85563:39885 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:08:52,167 INFO [M:0;5134ffc85563:39885 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-19T01:08:52,186 DEBUG [M:0;5134ffc85563:39885 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8394340324d845bbba52b0bb20604745 is 82, key is hbase:meta,,1/info:regioninfo/1731978531821/Put/seqid=0 2024-11-19T01:08:52,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40715 is added to blk_1073741837_1013 (size=5672) 2024-11-19T01:08:52,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45809 is added to blk_1073741837_1013 (size=5672) 2024-11-19T01:08:52,198 INFO [M:0;5134ffc85563:39885 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8394340324d845bbba52b0bb20604745 2024-11-19T01:08:52,239 DEBUG [M:0;5134ffc85563:39885 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2cd53232857f430081f7356b8ae64117 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731978531856/Put/seqid=0 2024-11-19T01:08:52,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40715 is added to blk_1073741838_1014 (size=5275) 2024-11-19T01:08:52,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45809 is added to blk_1073741838_1014 (size=5275) 2024-11-19T01:08:52,249 INFO [M:0;5134ffc85563:39885 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2cd53232857f430081f7356b8ae64117 2024-11-19T01:08:52,250 INFO [RS:0;5134ffc85563:40979 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T01:08:52,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:08:52,250 INFO [RS:0;5134ffc85563:40979 {}] regionserver.HRegionServer(1031): Exiting; stopping=5134ffc85563,40979,1731978530754; zookeeper connection closed. 2024-11-19T01:08:52,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x101088a04190001, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:08:52,250 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5b4e253b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5b4e253b 2024-11-19T01:08:52,250 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T01:08:52,281 DEBUG [M:0;5134ffc85563:39885 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ed2099fc85354a72a903db1482d8cd85 is 69, key is 5134ffc85563,40979,1731978530754/rs:state/1731978531007/Put/seqid=0 2024-11-19T01:08:52,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40715 is added to blk_1073741839_1015 (size=5156) 2024-11-19T01:08:52,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45809 is added to blk_1073741839_1015 (size=5156) 2024-11-19T01:08:52,292 INFO [M:0;5134ffc85563:39885 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ed2099fc85354a72a903db1482d8cd85 2024-11-19T01:08:52,319 DEBUG [M:0;5134ffc85563:39885 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1859a82050a94d82a002b5d69c9a0edc is 52, key is load_balancer_on/state:d/1731978531922/Put/seqid=0 2024-11-19T01:08:52,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45809 is added to blk_1073741840_1016 (size=5056) 2024-11-19T01:08:52,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40715 is added to blk_1073741840_1016 (size=5056) 2024-11-19T01:08:52,327 INFO [M:0;5134ffc85563:39885 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1859a82050a94d82a002b5d69c9a0edc 2024-11-19T01:08:52,334 DEBUG [M:0;5134ffc85563:39885 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8394340324d845bbba52b0bb20604745 as hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8394340324d845bbba52b0bb20604745 2024-11-19T01:08:52,345 INFO [M:0;5134ffc85563:39885 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8394340324d845bbba52b0bb20604745, entries=8, sequenceid=29, filesize=5.5 K 2024-11-19T01:08:52,347 DEBUG [M:0;5134ffc85563:39885 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2cd53232857f430081f7356b8ae64117 as hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2cd53232857f430081f7356b8ae64117 2024-11-19T01:08:52,354 INFO [M:0;5134ffc85563:39885 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2cd53232857f430081f7356b8ae64117, entries=3, sequenceid=29, filesize=5.2 K 2024-11-19T01:08:52,355 DEBUG [M:0;5134ffc85563:39885 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ed2099fc85354a72a903db1482d8cd85 as hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ed2099fc85354a72a903db1482d8cd85 2024-11-19T01:08:52,364 INFO [M:0;5134ffc85563:39885 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ed2099fc85354a72a903db1482d8cd85, entries=1, sequenceid=29, filesize=5.0 K 2024-11-19T01:08:52,365 DEBUG [M:0;5134ffc85563:39885 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1859a82050a94d82a002b5d69c9a0edc as hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1859a82050a94d82a002b5d69c9a0edc 2024-11-19T01:08:52,376 INFO [M:0;5134ffc85563:39885 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39165/user/jenkins/test-data/615e7781-add6-7b8e-34d1-4bfe1e4069aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1859a82050a94d82a002b5d69c9a0edc, entries=1, sequenceid=29, filesize=4.9 K 2024-11-19T01:08:52,378 INFO [M:0;5134ffc85563:39885 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 211ms, sequenceid=29, compaction requested=false 2024-11-19T01:08:52,380 INFO [M:0;5134ffc85563:39885 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:08:52,380 DEBUG [M:0;5134ffc85563:39885 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731978532166Disabling compacts and flushes for region at 1731978532166Disabling writes for close at 1731978532166Obtaining lock to block concurrent updates at 1731978532167 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731978532167Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731978532167Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731978532168 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731978532168Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731978532185 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731978532185Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731978532207 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731978532238 (+31 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731978532238Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731978532256 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731978532280 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731978532280Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731978532302 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731978532319 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731978532319Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1dd1f6ce: reopening flushed file at 1731978532333 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f54e641: reopening flushed file at 1731978532346 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2262ff34: reopening flushed file at 1731978532354 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a36c73d: reopening flushed file at 1731978532364 (+10 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 211ms, sequenceid=29, compaction requested=false at 1731978532378 (+14 ms)Writing region close event to WAL at 1731978532380 (+2 ms)Closed at 1731978532380 2024-11-19T01:08:52,381 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:52,381 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:52,381 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:52,382 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:52,382 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:08:52,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45809 is added to blk_1073741830_1006 (size=10311) 2024-11-19T01:08:52,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40715 is added to blk_1073741830_1006 (size=10311) 2024-11-19T01:08:52,787 INFO [M:0;5134ffc85563:39885 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T01:08:52,787 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T01:08:52,787 INFO [M:0;5134ffc85563:39885 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39885 2024-11-19T01:08:52,787 INFO [M:0;5134ffc85563:39885 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T01:08:52,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:08:52,890 INFO [M:0;5134ffc85563:39885 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T01:08:52,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39885-0x101088a04190000, quorum=127.0.0.1:49840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:08:52,896 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@18492d7d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:08:52,897 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@30ebe7e3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:08:52,897 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:08:52,897 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@274298f3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:08:52,897 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@194f043a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/hadoop.log.dir/,STOPPED} 2024-11-19T01:08:52,899 WARN [BP-1849944892-172.17.0.2-1731978529884 heartbeating to localhost/127.0.0.1:39165 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:08:52,899 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:08:52,899 WARN [BP-1849944892-172.17.0.2-1731978529884 heartbeating to localhost/127.0.0.1:39165 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1849944892-172.17.0.2-1731978529884 (Datanode Uuid 0f49fc78-6bb7-4826-bf40-e3f9064b1f4a) service to localhost/127.0.0.1:39165 2024-11-19T01:08:52,899 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:08:52,900 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/cluster_ad248f48-acec-2d2e-4287-076004853c7b/data/data3/current/BP-1849944892-172.17.0.2-1731978529884 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:08:52,900 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/cluster_ad248f48-acec-2d2e-4287-076004853c7b/data/data4/current/BP-1849944892-172.17.0.2-1731978529884 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:08:52,900 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:08:52,903 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1a15ed6a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:08:52,903 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@b5aedfa{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:08:52,904 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:08:52,904 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@469dec96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:08:52,904 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69a0f3c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/hadoop.log.dir/,STOPPED} 2024-11-19T01:08:52,906 WARN [BP-1849944892-172.17.0.2-1731978529884 heartbeating to localhost/127.0.0.1:39165 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:08:52,906 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:08:52,906 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:08:52,906 WARN [BP-1849944892-172.17.0.2-1731978529884 heartbeating to localhost/127.0.0.1:39165 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1849944892-172.17.0.2-1731978529884 (Datanode Uuid 12bb7869-1b14-4df0-8ef4-77bf64e93432) service to localhost/127.0.0.1:39165 2024-11-19T01:08:52,906 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/cluster_ad248f48-acec-2d2e-4287-076004853c7b/data/data1/current/BP-1849944892-172.17.0.2-1731978529884 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:08:52,907 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/cluster_ad248f48-acec-2d2e-4287-076004853c7b/data/data2/current/BP-1849944892-172.17.0.2-1731978529884 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:08:52,907 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:08:52,915 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1edca743{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T01:08:52,916 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c443180{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:08:52,916 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:08:52,916 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59505eb5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:08:52,916 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46d26a79{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/hadoop.log.dir/,STOPPED} 2024-11-19T01:08:52,925 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T01:08:52,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T01:08:52,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T01:08:52,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/hadoop.log.dir so I do NOT create it in target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd 2024-11-19T01:08:52,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d4ab7969-8082-7c73-2b15-f6d60f7a5c37/hadoop.tmp.dir so I do NOT create it in target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd 2024-11-19T01:08:52,951 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73, deleteOnExit=true 2024-11-19T01:08:52,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T01:08:52,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/test.cache.data in system properties and HBase conf 2024-11-19T01:08:52,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T01:08:52,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/hadoop.log.dir in system properties and HBase conf 2024-11-19T01:08:52,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T01:08:52,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T01:08:52,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T01:08:52,952 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T01:08:52,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T01:08:52,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T01:08:52,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T01:08:52,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T01:08:52,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T01:08:52,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T01:08:52,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T01:08:52,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T01:08:52,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T01:08:52,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/nfs.dump.dir in system properties and HBase conf 2024-11-19T01:08:52,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/java.io.tmpdir in system properties and HBase conf 2024-11-19T01:08:52,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T01:08:52,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T01:08:52,955 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T01:08:52,976 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T01:08:53,025 INFO [regionserver/5134ffc85563:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T01:08:53,048 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:08:53,053 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:08:53,055 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:08:53,055 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:08:53,055 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T01:08:53,056 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:08:53,057 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@142d24a0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:08:53,057 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42b52d44{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:08:53,186 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c00ef51{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/java.io.tmpdir/jetty-localhost-33711-hadoop-hdfs-3_4_1-tests_jar-_-any-2300138050151648150/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T01:08:53,187 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@73b9709e{HTTP/1.1, (http/1.1)}{localhost:33711} 2024-11-19T01:08:53,187 INFO [Time-limited test {}] server.Server(415): Started @104483ms 2024-11-19T01:08:53,201 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T01:08:53,312 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:08:53,316 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:08:53,319 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:08:53,319 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:08:53,319 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T01:08:53,320 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65dec1b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:08:53,320 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a107105{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:08:53,438 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6de86657{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/java.io.tmpdir/jetty-localhost-32987-hadoop-hdfs-3_4_1-tests_jar-_-any-2904252554024117291/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:08:53,438 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6787773a{HTTP/1.1, (http/1.1)}{localhost:32987} 2024-11-19T01:08:53,438 INFO [Time-limited test {}] server.Server(415): Started @104734ms 2024-11-19T01:08:53,440 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:08:53,494 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:08:53,499 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:08:53,500 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:08:53,500 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:08:53,500 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T01:08:53,501 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66182b08{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:08:53,502 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7eee535{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:08:53,556 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data1/current/BP-397303664-172.17.0.2-1731978532996/current, will proceed with Du for space computation calculation, 2024-11-19T01:08:53,556 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data2/current/BP-397303664-172.17.0.2-1731978532996/current, will proceed with Du for space computation calculation, 2024-11-19T01:08:53,582 WARN [Thread-635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:08:53,585 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x79556cde9cca6f7 with lease ID 0xb474c26cb170589a: Processing first storage report for DS-e17ca27b-d886-49fb-ae3d-4459f43db400 from datanode DatanodeRegistration(127.0.0.1:39479, datanodeUuid=b2f5f431-118e-4697-a012-633a30fcbc5b, infoPort=35905, infoSecurePort=0, ipcPort=40121, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996) 2024-11-19T01:08:53,585 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x79556cde9cca6f7 with lease ID 0xb474c26cb170589a: from storage DS-e17ca27b-d886-49fb-ae3d-4459f43db400 node DatanodeRegistration(127.0.0.1:39479, datanodeUuid=b2f5f431-118e-4697-a012-633a30fcbc5b, infoPort=35905, infoSecurePort=0, ipcPort=40121, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:08:53,585 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x79556cde9cca6f7 with lease ID 0xb474c26cb170589a: Processing first storage report for DS-8b92dd6b-c1cb-4477-9e6f-c15f53681f03 from datanode DatanodeRegistration(127.0.0.1:39479, datanodeUuid=b2f5f431-118e-4697-a012-633a30fcbc5b, infoPort=35905, infoSecurePort=0, ipcPort=40121, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996) 2024-11-19T01:08:53,585 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x79556cde9cca6f7 with lease ID 0xb474c26cb170589a: from storage DS-8b92dd6b-c1cb-4477-9e6f-c15f53681f03 node DatanodeRegistration(127.0.0.1:39479, datanodeUuid=b2f5f431-118e-4697-a012-633a30fcbc5b, infoPort=35905, infoSecurePort=0, ipcPort=40121, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:08:53,672 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f2859b3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/java.io.tmpdir/jetty-localhost-38881-hadoop-hdfs-3_4_1-tests_jar-_-any-2450423966467674206/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:08:53,672 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3b918d2a{HTTP/1.1, (http/1.1)}{localhost:38881} 2024-11-19T01:08:53,672 INFO [Time-limited test {}] server.Server(415): Started @104968ms 2024-11-19T01:08:53,675 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:08:53,780 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data3/current/BP-397303664-172.17.0.2-1731978532996/current, will proceed with Du for space computation calculation, 2024-11-19T01:08:53,780 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data4/current/BP-397303664-172.17.0.2-1731978532996/current, will proceed with Du for space computation calculation, 2024-11-19T01:08:53,814 WARN [Thread-671 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:08:53,818 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x765133523d1d6c1c with lease ID 0xb474c26cb170589b: Processing first storage report for DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b from datanode DatanodeRegistration(127.0.0.1:41819, datanodeUuid=bf6c2955-85f5-4c42-a530-315160b9fc3f, infoPort=39515, infoSecurePort=0, ipcPort=42945, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996) 2024-11-19T01:08:53,818 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x765133523d1d6c1c with lease ID 0xb474c26cb170589b: from storage DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b node DatanodeRegistration(127.0.0.1:41819, datanodeUuid=bf6c2955-85f5-4c42-a530-315160b9fc3f, infoPort=39515, infoSecurePort=0, ipcPort=42945, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T01:08:53,818 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x765133523d1d6c1c with lease ID 0xb474c26cb170589b: Processing first storage report for DS-572c03a4-0198-4757-b0ce-aacb3fc0872a from datanode DatanodeRegistration(127.0.0.1:41819, datanodeUuid=bf6c2955-85f5-4c42-a530-315160b9fc3f, infoPort=39515, infoSecurePort=0, ipcPort=42945, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996) 2024-11-19T01:08:53,818 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x765133523d1d6c1c with lease ID 0xb474c26cb170589b: from storage DS-572c03a4-0198-4757-b0ce-aacb3fc0872a node DatanodeRegistration(127.0.0.1:41819, datanodeUuid=bf6c2955-85f5-4c42-a530-315160b9fc3f, infoPort=39515, infoSecurePort=0, ipcPort=42945, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:08:53,839 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd 2024-11-19T01:08:53,846 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/zookeeper_0, clientPort=61906, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T01:08:53,848 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61906 2024-11-19T01:08:53,848 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:08:53,850 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:08:53,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41819 is added to blk_1073741825_1001 (size=7) 2024-11-19T01:08:53,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39479 is added to blk_1073741825_1001 (size=7) 2024-11-19T01:08:53,876 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c with version=8 2024-11-19T01:08:53,876 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/hbase-staging 2024-11-19T01:08:53,878 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5134ffc85563:0 server-side Connection retries=45 2024-11-19T01:08:53,879 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:08:53,879 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T01:08:53,879 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T01:08:53,879 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:08:53,879 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T01:08:53,879 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T01:08:53,879 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T01:08:53,881 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35651 2024-11-19T01:08:53,882 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35651 connecting to ZooKeeper ensemble=127.0.0.1:61906 2024-11-19T01:08:53,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:356510x0, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T01:08:53,890 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35651-0x101088a107c0000 connected 2024-11-19T01:08:53,923 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:08:53,925 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:08:53,927 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:08:53,928 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c, hbase.cluster.distributed=false 2024-11-19T01:08:53,929 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T01:08:53,933 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35651 2024-11-19T01:08:53,933 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35651 2024-11-19T01:08:53,936 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35651 2024-11-19T01:08:53,936 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35651 2024-11-19T01:08:53,937 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35651 2024-11-19T01:08:53,954 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5134ffc85563:0 server-side Connection retries=45 2024-11-19T01:08:53,954 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:08:53,954 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T01:08:53,954 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T01:08:53,954 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:08:53,954 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T01:08:53,954 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T01:08:53,955 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T01:08:53,955 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46771 2024-11-19T01:08:53,957 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46771 connecting to ZooKeeper ensemble=127.0.0.1:61906 2024-11-19T01:08:53,958 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:08:53,960 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:08:53,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:467710x0, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T01:08:53,965 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46771-0x101088a107c0001 connected 2024-11-19T01:08:53,965 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:08:53,965 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T01:08:53,970 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T01:08:53,970 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T01:08:53,971 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T01:08:53,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46771 2024-11-19T01:08:53,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46771 2024-11-19T01:08:53,975 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46771 2024-11-19T01:08:53,977 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46771 2024-11-19T01:08:53,981 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46771 2024-11-19T01:08:54,000 DEBUG [M:0;5134ffc85563:35651 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5134ffc85563:35651 2024-11-19T01:08:54,000 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5134ffc85563,35651,1731978533878 2024-11-19T01:08:54,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:08:54,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:08:54,003 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5134ffc85563,35651,1731978533878 2024-11-19T01:08:54,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T01:08:54,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:54,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:54,011 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T01:08:54,012 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5134ffc85563,35651,1731978533878 from backup master directory 2024-11-19T01:08:54,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:08:54,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5134ffc85563,35651,1731978533878 2024-11-19T01:08:54,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:08:54,017 WARN [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T01:08:54,017 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5134ffc85563,35651,1731978533878 2024-11-19T01:08:54,031 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/hbase.id] with ID: 5e71cc00-0aeb-428b-99fa-45ef16579d8b 2024-11-19T01:08:54,031 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/.tmp/hbase.id 2024-11-19T01:08:54,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39479 is added to blk_1073741826_1002 (size=42) 2024-11-19T01:08:54,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41819 is added to blk_1073741826_1002 (size=42) 2024-11-19T01:08:54,046 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/.tmp/hbase.id]:[hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/hbase.id] 2024-11-19T01:08:54,062 INFO [master/5134ffc85563:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:08:54,062 INFO [master/5134ffc85563:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T01:08:54,063 INFO [master/5134ffc85563:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-19T01:08:54,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:54,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:54,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39479 is added to blk_1073741827_1003 (size=196) 2024-11-19T01:08:54,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41819 is added to blk_1073741827_1003 (size=196) 2024-11-19T01:08:54,088 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T01:08:54,089 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T01:08:54,089 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:08:54,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39479 is added to blk_1073741828_1004 (size=1189) 2024-11-19T01:08:54,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41819 is added to blk_1073741828_1004 (size=1189) 2024-11-19T01:08:54,101 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store 2024-11-19T01:08:54,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39479 is added to blk_1073741829_1005 (size=34) 2024-11-19T01:08:54,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41819 is added to blk_1073741829_1005 (size=34) 2024-11-19T01:08:54,121 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:08:54,121 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T01:08:54,121 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:08:54,121 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:08:54,121 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T01:08:54,121 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:08:54,121 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:08:54,121 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731978534121Disabling compacts and flushes for region at 1731978534121Disabling writes for close at 1731978534121Writing region close event to WAL at 1731978534121Closed at 1731978534121 2024-11-19T01:08:54,122 WARN [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/.initializing 2024-11-19T01:08:54,123 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/WALs/5134ffc85563,35651,1731978533878 2024-11-19T01:08:54,126 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C35651%2C1731978533878, suffix=, logDir=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/WALs/5134ffc85563,35651,1731978533878, archiveDir=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/oldWALs, maxLogs=10 2024-11-19T01:08:54,127 INFO [master/5134ffc85563:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C35651%2C1731978533878.1731978534127 2024-11-19T01:08:54,137 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/WALs/5134ffc85563,35651,1731978533878/5134ffc85563%2C35651%2C1731978533878.1731978534127 2024-11-19T01:08:54,142 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35905:35905),(127.0.0.1/127.0.0.1:39515:39515)] 2024-11-19T01:08:54,148 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T01:08:54,148 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:08:54,148 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:54,149 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:54,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:54,153 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T01:08:54,153 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:54,153 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:08:54,154 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:54,155 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T01:08:54,155 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:54,156 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:08:54,156 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:54,157 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T01:08:54,157 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:54,158 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:08:54,158 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:54,159 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T01:08:54,159 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:54,160 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:08:54,160 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:54,161 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:54,162 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:54,163 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:54,163 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:54,164 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T01:08:54,165 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:08:54,168 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T01:08:54,168 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=759167, jitterRate=-0.03466951847076416}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T01:08:54,170 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731978534149Initializing all the Stores at 1731978534150 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978534150Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978534150Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978534150Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978534150Cleaning up temporary data from old regions at 1731978534163 (+13 ms)Region opened successfully at 1731978534169 (+6 ms) 2024-11-19T01:08:54,170 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T01:08:54,174 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2242bc7e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5134ffc85563/172.17.0.2:0 2024-11-19T01:08:54,175 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T01:08:54,175 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T01:08:54,175 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T01:08:54,175 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T01:08:54,176 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T01:08:54,176 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T01:08:54,176 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T01:08:54,178 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T01:08:54,179 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T01:08:54,180 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T01:08:54,181 INFO [master/5134ffc85563:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T01:08:54,181 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T01:08:54,182 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T01:08:54,183 INFO [master/5134ffc85563:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T01:08:54,184 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T01:08:54,185 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T01:08:54,186 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T01:08:54,187 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T01:08:54,189 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T01:08:54,190 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T01:08:54,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T01:08:54,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T01:08:54,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:54,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:54,192 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5134ffc85563,35651,1731978533878, sessionid=0x101088a107c0000, setting cluster-up flag (Was=false) 2024-11-19T01:08:54,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:54,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:54,199 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T01:08:54,200 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5134ffc85563,35651,1731978533878 2024-11-19T01:08:54,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:54,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:54,207 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T01:08:54,208 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5134ffc85563,35651,1731978533878 2024-11-19T01:08:54,210 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T01:08:54,211 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T01:08:54,212 INFO [master/5134ffc85563:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T01:08:54,212 INFO [master/5134ffc85563:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T01:08:54,212 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5134ffc85563,35651,1731978533878 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T01:08:54,213 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:08:54,213 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:08:54,213 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:08:54,213 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:08:54,213 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5134ffc85563:0, corePoolSize=10, maxPoolSize=10 2024-11-19T01:08:54,213 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:54,214 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5134ffc85563:0, corePoolSize=2, maxPoolSize=2 2024-11-19T01:08:54,214 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:54,214 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731978564214 2024-11-19T01:08:54,214 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T01:08:54,215 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T01:08:54,215 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T01:08:54,215 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T01:08:54,215 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T01:08:54,215 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T01:08:54,215 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:54,215 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T01:08:54,216 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T01:08:54,216 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:08:54,216 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T01:08:54,216 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T01:08:54,216 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T01:08:54,216 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T01:08:54,216 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978534216,5,FailOnTimeoutGroup] 2024-11-19T01:08:54,217 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978534216,5,FailOnTimeoutGroup] 2024-11-19T01:08:54,217 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:54,217 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T01:08:54,217 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:54,217 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:54,217 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:54,217 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T01:08:54,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39479 is added to blk_1073741831_1007 (size=1321) 2024-11-19T01:08:54,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41819 is added to blk_1073741831_1007 (size=1321) 2024-11-19T01:08:54,228 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T01:08:54,229 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c 2024-11-19T01:08:54,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41819 is added to blk_1073741832_1008 (size=32) 2024-11-19T01:08:54,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39479 is added to blk_1073741832_1008 (size=32) 2024-11-19T01:08:54,241 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:08:54,242 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T01:08:54,243 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T01:08:54,244 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:54,244 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:08:54,244 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T01:08:54,245 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T01:08:54,246 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:54,246 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:08:54,246 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T01:08:54,248 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T01:08:54,248 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:54,248 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:08:54,248 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T01:08:54,249 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T01:08:54,249 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:54,250 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:08:54,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T01:08:54,251 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740 2024-11-19T01:08:54,251 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740 2024-11-19T01:08:54,253 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T01:08:54,253 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T01:08:54,254 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T01:08:54,255 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T01:08:54,258 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T01:08:54,258 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=877793, jitterRate=0.11617153882980347}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T01:08:54,260 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731978534241Initializing all the Stores at 1731978534242 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978534242Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978534242Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978534242Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978534242Cleaning up temporary data from old regions at 1731978534253 (+11 ms)Region opened successfully at 1731978534260 (+7 ms) 2024-11-19T01:08:54,260 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T01:08:54,260 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T01:08:54,260 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T01:08:54,260 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T01:08:54,260 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T01:08:54,261 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T01:08:54,261 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731978534260Disabling compacts and flushes for region at 1731978534260Disabling writes for close at 1731978534260Writing region close event to WAL at 1731978534260Closed at 1731978534261 (+1 ms) 2024-11-19T01:08:54,262 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:08:54,262 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T01:08:54,263 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T01:08:54,264 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T01:08:54,266 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T01:08:54,283 INFO [RS:0;5134ffc85563:46771 {}] regionserver.HRegionServer(746): ClusterId : 5e71cc00-0aeb-428b-99fa-45ef16579d8b 2024-11-19T01:08:54,283 DEBUG [RS:0;5134ffc85563:46771 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T01:08:54,286 DEBUG [RS:0;5134ffc85563:46771 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T01:08:54,286 DEBUG [RS:0;5134ffc85563:46771 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T01:08:54,288 DEBUG [RS:0;5134ffc85563:46771 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T01:08:54,288 DEBUG [RS:0;5134ffc85563:46771 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@285791f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5134ffc85563/172.17.0.2:0 2024-11-19T01:08:54,301 DEBUG [RS:0;5134ffc85563:46771 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5134ffc85563:46771 2024-11-19T01:08:54,301 INFO [RS:0;5134ffc85563:46771 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T01:08:54,301 INFO [RS:0;5134ffc85563:46771 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T01:08:54,301 DEBUG [RS:0;5134ffc85563:46771 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T01:08:54,302 INFO [RS:0;5134ffc85563:46771 {}] regionserver.HRegionServer(2659): reportForDuty to master=5134ffc85563,35651,1731978533878 with port=46771, startcode=1731978533954 2024-11-19T01:08:54,302 DEBUG [RS:0;5134ffc85563:46771 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T01:08:54,305 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36935, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T01:08:54,305 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35651 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5134ffc85563,46771,1731978533954 2024-11-19T01:08:54,305 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35651 {}] master.ServerManager(517): Registering regionserver=5134ffc85563,46771,1731978533954 2024-11-19T01:08:54,307 DEBUG [RS:0;5134ffc85563:46771 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c 2024-11-19T01:08:54,307 DEBUG [RS:0;5134ffc85563:46771 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40963 2024-11-19T01:08:54,308 DEBUG [RS:0;5134ffc85563:46771 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T01:08:54,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T01:08:54,310 DEBUG [RS:0;5134ffc85563:46771 {}] zookeeper.ZKUtil(111): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5134ffc85563,46771,1731978533954 2024-11-19T01:08:54,310 WARN [RS:0;5134ffc85563:46771 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T01:08:54,310 INFO [RS:0;5134ffc85563:46771 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:08:54,310 DEBUG [RS:0;5134ffc85563:46771 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954 2024-11-19T01:08:54,310 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5134ffc85563,46771,1731978533954] 2024-11-19T01:08:54,314 INFO [RS:0;5134ffc85563:46771 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T01:08:54,316 INFO [RS:0;5134ffc85563:46771 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T01:08:54,317 INFO [RS:0;5134ffc85563:46771 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T01:08:54,317 INFO [RS:0;5134ffc85563:46771 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:54,318 INFO [RS:0;5134ffc85563:46771 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T01:08:54,318 INFO [RS:0;5134ffc85563:46771 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T01:08:54,319 INFO [RS:0;5134ffc85563:46771 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:54,319 DEBUG [RS:0;5134ffc85563:46771 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:54,319 DEBUG [RS:0;5134ffc85563:46771 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:54,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T01:08:54,319 DEBUG [RS:0;5134ffc85563:46771 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:54,319 DEBUG [RS:0;5134ffc85563:46771 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:54,319 DEBUG [RS:0;5134ffc85563:46771 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:54,319 DEBUG [RS:0;5134ffc85563:46771 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5134ffc85563:0, corePoolSize=2, maxPoolSize=2 2024-11-19T01:08:54,319 DEBUG [RS:0;5134ffc85563:46771 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:54,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T01:08:54,319 DEBUG [RS:0;5134ffc85563:46771 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:54,319 DEBUG [RS:0;5134ffc85563:46771 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:54,319 DEBUG [RS:0;5134ffc85563:46771 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:54,319 DEBUG [RS:0;5134ffc85563:46771 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:54,319 DEBUG [RS:0;5134ffc85563:46771 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:54,319 DEBUG [RS:0;5134ffc85563:46771 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5134ffc85563:0, corePoolSize=3, maxPoolSize=3 2024-11-19T01:08:54,319 DEBUG [RS:0;5134ffc85563:46771 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0, corePoolSize=3, maxPoolSize=3 2024-11-19T01:08:54,320 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-19T01:08:54,320 INFO [RS:0;5134ffc85563:46771 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:54,321 INFO [RS:0;5134ffc85563:46771 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:54,321 INFO [RS:0;5134ffc85563:46771 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:54,322 INFO [RS:0;5134ffc85563:46771 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:54,322 INFO [RS:0;5134ffc85563:46771 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:54,322 INFO [RS:0;5134ffc85563:46771 {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,46771,1731978533954-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T01:08:54,345 INFO [RS:0;5134ffc85563:46771 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T01:08:54,346 INFO [RS:0;5134ffc85563:46771 {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,46771,1731978533954-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:54,346 INFO [RS:0;5134ffc85563:46771 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:54,346 INFO [RS:0;5134ffc85563:46771 {}] regionserver.Replication(171): 5134ffc85563,46771,1731978533954 started 2024-11-19T01:08:54,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:08:54,353 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:08:54,364 INFO [RS:0;5134ffc85563:46771 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:54,364 INFO [RS:0;5134ffc85563:46771 {}] regionserver.HRegionServer(1482): Serving as 5134ffc85563,46771,1731978533954, RpcServer on 5134ffc85563/172.17.0.2:46771, sessionid=0x101088a107c0001 2024-11-19T01:08:54,365 DEBUG [RS:0;5134ffc85563:46771 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T01:08:54,365 DEBUG [RS:0;5134ffc85563:46771 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5134ffc85563,46771,1731978533954 2024-11-19T01:08:54,365 DEBUG [RS:0;5134ffc85563:46771 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5134ffc85563,46771,1731978533954' 2024-11-19T01:08:54,365 DEBUG [RS:0;5134ffc85563:46771 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T01:08:54,365 DEBUG [RS:0;5134ffc85563:46771 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T01:08:54,366 DEBUG [RS:0;5134ffc85563:46771 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T01:08:54,366 DEBUG [RS:0;5134ffc85563:46771 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T01:08:54,366 DEBUG [RS:0;5134ffc85563:46771 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5134ffc85563,46771,1731978533954 2024-11-19T01:08:54,366 DEBUG [RS:0;5134ffc85563:46771 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5134ffc85563,46771,1731978533954' 2024-11-19T01:08:54,366 DEBUG [RS:0;5134ffc85563:46771 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T01:08:54,367 DEBUG [RS:0;5134ffc85563:46771 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T01:08:54,367 DEBUG [RS:0;5134ffc85563:46771 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T01:08:54,367 INFO [RS:0;5134ffc85563:46771 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T01:08:54,367 INFO [RS:0;5134ffc85563:46771 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T01:08:54,416 WARN [5134ffc85563:35651 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T01:08:54,470 INFO [RS:0;5134ffc85563:46771 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C46771%2C1731978533954, suffix=, logDir=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954, archiveDir=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/oldWALs, maxLogs=32 2024-11-19T01:08:54,471 INFO [RS:0;5134ffc85563:46771 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C46771%2C1731978533954.1731978534471 2024-11-19T01:08:54,478 INFO [RS:0;5134ffc85563:46771 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978534471 2024-11-19T01:08:54,479 DEBUG [RS:0;5134ffc85563:46771 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39515:39515),(127.0.0.1/127.0.0.1:35905:35905)] 2024-11-19T01:08:54,666 DEBUG [5134ffc85563:35651 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T01:08:54,667 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5134ffc85563,46771,1731978533954 2024-11-19T01:08:54,669 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5134ffc85563,46771,1731978533954, state=OPENING 2024-11-19T01:08:54,670 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T01:08:54,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:54,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:08:54,673 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:08:54,673 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:08:54,675 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T01:08:54,675 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5134ffc85563,46771,1731978533954}] 2024-11-19T01:08:54,830 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T01:08:54,832 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38117, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T01:08:54,837 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T01:08:54,837 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:08:54,839 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C46771%2C1731978533954.meta, suffix=.meta, logDir=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954, archiveDir=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/oldWALs, maxLogs=32 2024-11-19T01:08:54,873 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta 2024-11-19T01:08:54,873 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T01:08:54,879 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:08:54,889 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta 2024-11-19T01:08:54,898 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39515:39515),(127.0.0.1/127.0.0.1:35905:35905)] 2024-11-19T01:08:54,908 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:08:54,911 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:08:54,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:08:54,917 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T01:08:54,918 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T01:08:54,918 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T01:08:54,918 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T01:08:54,918 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T01:08:54,918 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:08:54,919 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T01:08:54,919 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T01:08:54,925 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T01:08:54,927 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T01:08:54,927 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:54,928 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:08:54,928 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T01:08:54,929 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T01:08:54,929 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:54,930 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:08:54,930 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T01:08:54,931 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T01:08:54,931 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:54,932 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:08:54,932 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T01:08:54,933 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T01:08:54,933 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:54,934 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:08:54,934 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T01:08:54,935 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740 2024-11-19T01:08:54,936 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740 2024-11-19T01:08:54,938 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T01:08:54,938 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T01:08:54,938 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T01:08:54,940 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T01:08:54,941 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=857837, jitterRate=0.09079721570014954}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T01:08:54,941 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T01:08:54,942 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731978534919Writing region info on filesystem at 1731978534919Initializing all the Stores at 1731978534920 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978534920Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978534925 (+5 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978534925Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978534925Cleaning up temporary data from old regions at 1731978534938 (+13 ms)Running coprocessor post-open hooks at 1731978534941 (+3 ms)Region opened successfully at 1731978534941 2024-11-19T01:08:54,943 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731978534829 2024-11-19T01:08:54,946 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T01:08:54,947 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T01:08:54,947 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5134ffc85563,46771,1731978533954 2024-11-19T01:08:54,948 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5134ffc85563,46771,1731978533954, state=OPEN 2024-11-19T01:08:54,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T01:08:54,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T01:08:54,953 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5134ffc85563,46771,1731978533954 2024-11-19T01:08:54,953 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:08:54,953 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:08:54,958 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T01:08:54,958 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5134ffc85563,46771,1731978533954 in 278 msec 2024-11-19T01:08:54,961 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T01:08:54,962 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 695 msec 2024-11-19T01:08:54,963 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:08:54,963 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T01:08:54,965 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T01:08:54,965 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5134ffc85563,46771,1731978533954, seqNum=-1] 2024-11-19T01:08:54,966 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T01:08:54,967 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56665, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T01:08:54,978 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 763 msec 2024-11-19T01:08:54,979 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731978534978, completionTime=-1 2024-11-19T01:08:54,979 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T01:08:54,979 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T01:08:54,981 INFO [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T01:08:54,981 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731978594981 2024-11-19T01:08:54,981 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731978654981 2024-11-19T01:08:54,981 INFO [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-19T01:08:54,981 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,35651,1731978533878-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:54,981 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,35651,1731978533878-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:54,981 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,35651,1731978533878-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:54,981 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5134ffc85563:35651, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:54,981 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:54,983 DEBUG [master/5134ffc85563:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T01:08:54,985 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:54,989 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.972sec 2024-11-19T01:08:54,989 INFO [master/5134ffc85563:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T01:08:54,989 INFO [master/5134ffc85563:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T01:08:54,989 INFO [master/5134ffc85563:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T01:08:54,989 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T01:08:54,989 INFO [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T01:08:54,989 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,35651,1731978533878-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T01:08:54,989 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,35651,1731978533878-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T01:08:54,992 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T01:08:54,992 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T01:08:54,992 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,35651,1731978533878-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:55,084 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47d1c4f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:08:55,084 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5134ffc85563,35651,-1 for getting cluster id 2024-11-19T01:08:55,084 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T01:08:55,087 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5e71cc00-0aeb-428b-99fa-45ef16579d8b' 2024-11-19T01:08:55,087 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T01:08:55,087 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5e71cc00-0aeb-428b-99fa-45ef16579d8b" 2024-11-19T01:08:55,088 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d8a8688, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:08:55,088 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5134ffc85563,35651,-1] 2024-11-19T01:08:55,088 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T01:08:55,088 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:08:55,090 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54552, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T01:08:55,091 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@597807df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:08:55,091 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T01:08:55,093 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5134ffc85563,46771,1731978533954, seqNum=-1] 2024-11-19T01:08:55,093 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T01:08:55,099 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39906, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T01:08:55,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5134ffc85563,35651,1731978533878 2024-11-19T01:08:55,101 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:08:55,106 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T01:08:55,122 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5134ffc85563:0 server-side Connection retries=45 2024-11-19T01:08:55,122 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:08:55,122 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T01:08:55,122 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T01:08:55,122 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:08:55,122 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T01:08:55,122 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T01:08:55,123 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T01:08:55,123 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43549 2024-11-19T01:08:55,125 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43549 connecting to ZooKeeper ensemble=127.0.0.1:61906 2024-11-19T01:08:55,125 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:08:55,127 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:08:55,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:435490x0, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T01:08:55,131 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:435490x0, quorum=127.0.0.1:61906, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-19T01:08:55,132 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-19T01:08:55,132 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T01:08:55,133 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43549-0x101088a107c0002 connected 2024-11-19T01:08:55,144 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T01:08:55,145 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:43549-0x101088a107c0002, quorum=127.0.0.1:61906, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T01:08:55,147 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43549-0x101088a107c0002, quorum=127.0.0.1:61906, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T01:08:55,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43549 2024-11-19T01:08:55,153 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43549 2024-11-19T01:08:55,155 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43549 2024-11-19T01:08:55,157 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43549 2024-11-19T01:08:55,157 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43549 2024-11-19T01:08:55,160 INFO [RS:1;5134ffc85563:43549 {}] regionserver.HRegionServer(746): ClusterId : 5e71cc00-0aeb-428b-99fa-45ef16579d8b 2024-11-19T01:08:55,160 DEBUG [RS:1;5134ffc85563:43549 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T01:08:55,163 DEBUG [RS:1;5134ffc85563:43549 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T01:08:55,163 DEBUG [RS:1;5134ffc85563:43549 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T01:08:55,175 DEBUG [RS:1;5134ffc85563:43549 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T01:08:55,176 DEBUG [RS:1;5134ffc85563:43549 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@153c0140, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5134ffc85563/172.17.0.2:0 2024-11-19T01:08:55,196 DEBUG [RS:1;5134ffc85563:43549 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;5134ffc85563:43549 2024-11-19T01:08:55,196 INFO [RS:1;5134ffc85563:43549 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T01:08:55,196 INFO [RS:1;5134ffc85563:43549 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T01:08:55,196 DEBUG [RS:1;5134ffc85563:43549 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T01:08:55,198 INFO [RS:1;5134ffc85563:43549 {}] regionserver.HRegionServer(2659): reportForDuty to master=5134ffc85563,35651,1731978533878 with port=43549, startcode=1731978535122 2024-11-19T01:08:55,198 DEBUG [RS:1;5134ffc85563:43549 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T01:08:55,207 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40709, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T01:08:55,208 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35651 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5134ffc85563,43549,1731978535122 2024-11-19T01:08:55,208 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35651 {}] master.ServerManager(517): Registering regionserver=5134ffc85563,43549,1731978535122 2024-11-19T01:08:55,211 DEBUG [RS:1;5134ffc85563:43549 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c 2024-11-19T01:08:55,211 DEBUG [RS:1;5134ffc85563:43549 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40963 2024-11-19T01:08:55,211 DEBUG [RS:1;5134ffc85563:43549 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T01:08:55,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T01:08:55,214 DEBUG [RS:1;5134ffc85563:43549 {}] zookeeper.ZKUtil(111): regionserver:43549-0x101088a107c0002, quorum=127.0.0.1:61906, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5134ffc85563,43549,1731978535122 2024-11-19T01:08:55,214 WARN [RS:1;5134ffc85563:43549 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T01:08:55,214 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5134ffc85563,43549,1731978535122] 2024-11-19T01:08:55,214 INFO [RS:1;5134ffc85563:43549 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:08:55,214 DEBUG [RS:1;5134ffc85563:43549 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122 2024-11-19T01:08:55,221 INFO [RS:1;5134ffc85563:43549 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T01:08:55,222 INFO [RS:1;5134ffc85563:43549 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T01:08:55,229 INFO [RS:1;5134ffc85563:43549 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T01:08:55,229 INFO [RS:1;5134ffc85563:43549 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:55,232 INFO [RS:1;5134ffc85563:43549 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T01:08:55,233 INFO [RS:1;5134ffc85563:43549 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T01:08:55,233 INFO [RS:1;5134ffc85563:43549 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:55,234 DEBUG [RS:1;5134ffc85563:43549 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:55,234 DEBUG [RS:1;5134ffc85563:43549 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:55,234 DEBUG [RS:1;5134ffc85563:43549 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:55,234 DEBUG [RS:1;5134ffc85563:43549 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:55,234 DEBUG [RS:1;5134ffc85563:43549 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:55,234 DEBUG [RS:1;5134ffc85563:43549 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5134ffc85563:0, corePoolSize=2, maxPoolSize=2 2024-11-19T01:08:55,234 DEBUG [RS:1;5134ffc85563:43549 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:55,234 DEBUG [RS:1;5134ffc85563:43549 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:55,234 DEBUG [RS:1;5134ffc85563:43549 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:55,234 DEBUG [RS:1;5134ffc85563:43549 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:55,234 DEBUG [RS:1;5134ffc85563:43549 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:55,234 DEBUG [RS:1;5134ffc85563:43549 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:08:55,235 DEBUG [RS:1;5134ffc85563:43549 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5134ffc85563:0, corePoolSize=3, maxPoolSize=3 2024-11-19T01:08:55,235 DEBUG [RS:1;5134ffc85563:43549 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0, corePoolSize=3, maxPoolSize=3 2024-11-19T01:08:55,244 INFO [RS:1;5134ffc85563:43549 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:55,244 INFO [RS:1;5134ffc85563:43549 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:55,244 INFO [RS:1;5134ffc85563:43549 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:55,244 INFO [RS:1;5134ffc85563:43549 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:55,244 INFO [RS:1;5134ffc85563:43549 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:55,245 INFO [RS:1;5134ffc85563:43549 {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,43549,1731978535122-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T01:08:55,265 INFO [RS:1;5134ffc85563:43549 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T01:08:55,265 INFO [RS:1;5134ffc85563:43549 {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,43549,1731978535122-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:55,265 INFO [RS:1;5134ffc85563:43549 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:55,265 INFO [RS:1;5134ffc85563:43549 {}] regionserver.Replication(171): 5134ffc85563,43549,1731978535122 started 2024-11-19T01:08:55,289 INFO [RS:1;5134ffc85563:43549 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:08:55,289 INFO [RS:1;5134ffc85563:43549 {}] regionserver.HRegionServer(1482): Serving as 5134ffc85563,43549,1731978535122, RpcServer on 5134ffc85563/172.17.0.2:43549, sessionid=0x101088a107c0002 2024-11-19T01:08:55,289 DEBUG [RS:1;5134ffc85563:43549 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T01:08:55,289 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;5134ffc85563:43549,5,FailOnTimeoutGroup] 2024-11-19T01:08:55,289 DEBUG [RS:1;5134ffc85563:43549 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5134ffc85563,43549,1731978535122 2024-11-19T01:08:55,290 DEBUG [RS:1;5134ffc85563:43549 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5134ffc85563,43549,1731978535122' 2024-11-19T01:08:55,290 DEBUG [RS:1;5134ffc85563:43549 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T01:08:55,290 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-19T01:08:55,290 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T01:08:55,291 DEBUG [RS:1;5134ffc85563:43549 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T01:08:55,292 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 5134ffc85563,35651,1731978533878 2024-11-19T01:08:55,292 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@ceedca5 2024-11-19T01:08:55,292 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T01:08:55,293 DEBUG [RS:1;5134ffc85563:43549 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T01:08:55,293 DEBUG [RS:1;5134ffc85563:43549 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T01:08:55,293 DEBUG [RS:1;5134ffc85563:43549 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5134ffc85563,43549,1731978535122 2024-11-19T01:08:55,293 DEBUG [RS:1;5134ffc85563:43549 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5134ffc85563,43549,1731978535122' 2024-11-19T01:08:55,293 DEBUG [RS:1;5134ffc85563:43549 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T01:08:55,293 DEBUG [RS:1;5134ffc85563:43549 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T01:08:55,294 DEBUG [RS:1;5134ffc85563:43549 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T01:08:55,294 INFO [RS:1;5134ffc85563:43549 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T01:08:55,294 INFO [RS:1;5134ffc85563:43549 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T01:08:55,300 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54556, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T01:08:55,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35651 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T01:08:55,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35651 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T01:08:55,302 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35651 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T01:08:55,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35651 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-19T01:08:55,305 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T01:08:55,306 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:55,306 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35651 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-19T01:08:55,307 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T01:08:55,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35651 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T01:08:55,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41819 is added to blk_1073741835_1011 (size=393) 2024-11-19T01:08:55,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39479 is added to blk_1073741835_1011 (size=393) 2024-11-19T01:08:55,335 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8eb7dabc2efd0fda67941096dba183a1, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c 2024-11-19T01:08:55,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41819 is added to blk_1073741836_1012 (size=76) 2024-11-19T01:08:55,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39479 is added to blk_1073741836_1012 (size=76) 2024-11-19T01:08:55,360 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:08:55,360 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 8eb7dabc2efd0fda67941096dba183a1, disabling compactions & flushes 2024-11-19T01:08:55,360 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1. 2024-11-19T01:08:55,360 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1. 2024-11-19T01:08:55,360 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1. after waiting 0 ms 2024-11-19T01:08:55,360 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1. 2024-11-19T01:08:55,360 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1. 2024-11-19T01:08:55,360 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8eb7dabc2efd0fda67941096dba183a1: Waiting for close lock at 1731978535360Disabling compacts and flushes for region at 1731978535360Disabling writes for close at 1731978535360Writing region close event to WAL at 1731978535360Closed at 1731978535360 2024-11-19T01:08:55,363 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T01:08:55,363 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731978535363"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731978535363"}]},"ts":"1731978535363"} 2024-11-19T01:08:55,369 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T01:08:55,371 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T01:08:55,371 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731978535371"}]},"ts":"1731978535371"} 2024-11-19T01:08:55,374 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-19T01:08:55,375 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8eb7dabc2efd0fda67941096dba183a1, ASSIGN}] 2024-11-19T01:08:55,377 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8eb7dabc2efd0fda67941096dba183a1, ASSIGN 2024-11-19T01:08:55,378 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8eb7dabc2efd0fda67941096dba183a1, ASSIGN; state=OFFLINE, location=5134ffc85563,46771,1731978533954; forceNewPlan=false, retain=false 2024-11-19T01:08:55,397 INFO [RS:1;5134ffc85563:43549 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C43549%2C1731978535122, suffix=, logDir=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122, archiveDir=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/oldWALs, maxLogs=32 2024-11-19T01:08:55,398 INFO [RS:1;5134ffc85563:43549 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C43549%2C1731978535122.1731978535398 2024-11-19T01:08:55,411 INFO [RS:1;5134ffc85563:43549 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 2024-11-19T01:08:55,417 DEBUG [RS:1;5134ffc85563:43549 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35905:35905),(127.0.0.1/127.0.0.1:39515:39515)] 2024-11-19T01:08:55,529 INFO [5134ffc85563:35651 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-19T01:08:55,530 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8eb7dabc2efd0fda67941096dba183a1, regionState=OPENING, regionLocation=5134ffc85563,46771,1731978533954 2024-11-19T01:08:55,533 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8eb7dabc2efd0fda67941096dba183a1, ASSIGN because future has completed 2024-11-19T01:08:55,534 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8eb7dabc2efd0fda67941096dba183a1, server=5134ffc85563,46771,1731978533954}] 2024-11-19T01:08:55,693 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1. 2024-11-19T01:08:55,693 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8eb7dabc2efd0fda67941096dba183a1, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1.', STARTKEY => '', ENDKEY => ''} 2024-11-19T01:08:55,694 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 8eb7dabc2efd0fda67941096dba183a1 2024-11-19T01:08:55,694 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:08:55,694 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8eb7dabc2efd0fda67941096dba183a1 2024-11-19T01:08:55,694 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8eb7dabc2efd0fda67941096dba183a1 2024-11-19T01:08:55,696 INFO [StoreOpener-8eb7dabc2efd0fda67941096dba183a1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8eb7dabc2efd0fda67941096dba183a1 2024-11-19T01:08:55,697 INFO [StoreOpener-8eb7dabc2efd0fda67941096dba183a1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8eb7dabc2efd0fda67941096dba183a1 columnFamilyName info 2024-11-19T01:08:55,697 DEBUG [StoreOpener-8eb7dabc2efd0fda67941096dba183a1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:08:55,698 INFO [StoreOpener-8eb7dabc2efd0fda67941096dba183a1-1 {}] regionserver.HStore(327): Store=8eb7dabc2efd0fda67941096dba183a1/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:08:55,698 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8eb7dabc2efd0fda67941096dba183a1 2024-11-19T01:08:55,699 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1 2024-11-19T01:08:55,699 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1 2024-11-19T01:08:55,700 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8eb7dabc2efd0fda67941096dba183a1 2024-11-19T01:08:55,700 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8eb7dabc2efd0fda67941096dba183a1 2024-11-19T01:08:55,702 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8eb7dabc2efd0fda67941096dba183a1 2024-11-19T01:08:55,705 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T01:08:55,705 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8eb7dabc2efd0fda67941096dba183a1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=806281, jitterRate=0.0252399742603302}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T01:08:55,705 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8eb7dabc2efd0fda67941096dba183a1 2024-11-19T01:08:55,706 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8eb7dabc2efd0fda67941096dba183a1: Running coprocessor pre-open hook at 1731978535694Writing region info on filesystem at 1731978535694Initializing all the Stores at 1731978535695 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978535695Cleaning up temporary data from old regions at 1731978535700 (+5 ms)Running coprocessor post-open hooks at 1731978535705 (+5 ms)Region opened successfully at 1731978535706 (+1 ms) 2024-11-19T01:08:55,707 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1., pid=6, masterSystemTime=1731978535688 2024-11-19T01:08:55,711 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1. 2024-11-19T01:08:55,711 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1. 2024-11-19T01:08:55,712 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8eb7dabc2efd0fda67941096dba183a1, regionState=OPEN, openSeqNum=2, regionLocation=5134ffc85563,46771,1731978533954 2024-11-19T01:08:55,715 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8eb7dabc2efd0fda67941096dba183a1, server=5134ffc85563,46771,1731978533954 because future has completed 2024-11-19T01:08:55,719 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T01:08:55,719 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8eb7dabc2efd0fda67941096dba183a1, server=5134ffc85563,46771,1731978533954 in 182 msec 2024-11-19T01:08:55,722 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T01:08:55,722 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8eb7dabc2efd0fda67941096dba183a1, ASSIGN in 344 msec 2024-11-19T01:08:55,733 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T01:08:55,733 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731978535733"}]},"ts":"1731978535733"} 2024-11-19T01:08:55,737 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-19T01:08:55,738 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T01:08:55,740 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 436 msec 2024-11-19T01:08:59,823 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T01:08:59,825 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:08:59,846 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:08:59,849 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:08:59,850 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:00,314 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-19T01:09:04,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T01:09:04,319 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T01:09:04,320 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-19T01:09:04,320 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-19T01:09:04,321 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T01:09:04,321 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T01:09:05,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35651 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T01:09:05,405 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-19T01:09:05,405 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-19T01:09:05,408 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-19T01:09:05,408 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1. 2024-11-19T01:09:05,421 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:09:05,424 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:09:05,425 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:09:05,425 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:09:05,425 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T01:09:05,426 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b53e4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:09:05,426 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@402062d6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:09:05,542 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7a2a3a4f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/java.io.tmpdir/jetty-localhost-44877-hadoop-hdfs-3_4_1-tests_jar-_-any-2170771455570740805/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:09:05,543 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7fa27241{HTTP/1.1, (http/1.1)}{localhost:44877} 2024-11-19T01:09:05,543 INFO [Time-limited test {}] server.Server(415): Started @116839ms 2024-11-19T01:09:05,544 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:09:05,575 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:09:05,579 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:09:05,579 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:09:05,580 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:09:05,580 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T01:09:05,580 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ca4b7c9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:09:05,580 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c5e4864{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:09:05,639 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data5/current/BP-397303664-172.17.0.2-1731978532996/current, will proceed with Du for space computation calculation, 2024-11-19T01:09:05,640 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data6/current/BP-397303664-172.17.0.2-1731978532996/current, will proceed with Du for space computation calculation, 2024-11-19T01:09:05,662 WARN [Thread-808 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:09:05,665 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9a0072a3ffde50ce with lease ID 0xb474c26cb170589c: Processing first storage report for DS-c71694d8-3b08-4346-9c6b-5a634a56ea22 from datanode DatanodeRegistration(127.0.0.1:46773, datanodeUuid=ffa1a434-7c42-4ec7-ba60-2acd46c9b2b5, infoPort=37835, infoSecurePort=0, ipcPort=41767, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996) 2024-11-19T01:09:05,665 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9a0072a3ffde50ce with lease ID 0xb474c26cb170589c: from storage DS-c71694d8-3b08-4346-9c6b-5a634a56ea22 node DatanodeRegistration(127.0.0.1:46773, datanodeUuid=ffa1a434-7c42-4ec7-ba60-2acd46c9b2b5, infoPort=37835, infoSecurePort=0, ipcPort=41767, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:09:05,665 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9a0072a3ffde50ce with lease ID 0xb474c26cb170589c: Processing first storage report for DS-7e9d3de5-410e-4fae-ae88-df2ab95a96ba from datanode DatanodeRegistration(127.0.0.1:46773, datanodeUuid=ffa1a434-7c42-4ec7-ba60-2acd46c9b2b5, infoPort=37835, infoSecurePort=0, ipcPort=41767, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996) 2024-11-19T01:09:05,665 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9a0072a3ffde50ce with lease ID 0xb474c26cb170589c: from storage DS-7e9d3de5-410e-4fae-ae88-df2ab95a96ba node DatanodeRegistration(127.0.0.1:46773, datanodeUuid=ffa1a434-7c42-4ec7-ba60-2acd46c9b2b5, infoPort=37835, infoSecurePort=0, ipcPort=41767, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:09:05,704 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1d80e9a4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/java.io.tmpdir/jetty-localhost-40285-hadoop-hdfs-3_4_1-tests_jar-_-any-9819993423447319238/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:09:05,705 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2c590722{HTTP/1.1, (http/1.1)}{localhost:40285} 2024-11-19T01:09:05,705 INFO [Time-limited test {}] server.Server(415): Started @117001ms 2024-11-19T01:09:05,706 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:09:05,741 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:09:05,745 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:09:05,746 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:09:05,746 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:09:05,746 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T01:09:05,748 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c141b19{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:09:05,749 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25509568{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:09:05,788 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data7/current/BP-397303664-172.17.0.2-1731978532996/current, will proceed with Du for space computation calculation, 2024-11-19T01:09:05,788 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data8/current/BP-397303664-172.17.0.2-1731978532996/current, will proceed with Du for space computation calculation, 2024-11-19T01:09:05,811 WARN [Thread-843 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:09:05,814 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3ece5092712c5df2 with lease ID 0xb474c26cb170589d: Processing first storage report for DS-3a4ff687-fb8b-439b-92cd-24b1289c8138 from datanode DatanodeRegistration(127.0.0.1:41521, datanodeUuid=da93b273-27b3-4855-a317-6873de7cd5f5, infoPort=41485, infoSecurePort=0, ipcPort=43465, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996) 2024-11-19T01:09:05,814 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3ece5092712c5df2 with lease ID 0xb474c26cb170589d: from storage DS-3a4ff687-fb8b-439b-92cd-24b1289c8138 node DatanodeRegistration(127.0.0.1:41521, datanodeUuid=da93b273-27b3-4855-a317-6873de7cd5f5, infoPort=41485, infoSecurePort=0, ipcPort=43465, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:09:05,814 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3ece5092712c5df2 with lease ID 0xb474c26cb170589d: Processing first storage report for DS-385790bf-2612-4bae-9ed2-b1cf4687daec from datanode DatanodeRegistration(127.0.0.1:41521, datanodeUuid=da93b273-27b3-4855-a317-6873de7cd5f5, infoPort=41485, infoSecurePort=0, ipcPort=43465, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996) 2024-11-19T01:09:05,814 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3ece5092712c5df2 with lease ID 0xb474c26cb170589d: from storage DS-385790bf-2612-4bae-9ed2-b1cf4687daec node DatanodeRegistration(127.0.0.1:41521, datanodeUuid=da93b273-27b3-4855-a317-6873de7cd5f5, infoPort=41485, infoSecurePort=0, ipcPort=43465, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:09:05,895 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b3e7853{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/java.io.tmpdir/jetty-localhost-37395-hadoop-hdfs-3_4_1-tests_jar-_-any-539598161839124199/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:09:05,895 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@65429201{HTTP/1.1, (http/1.1)}{localhost:37395} 2024-11-19T01:09:05,895 INFO [Time-limited test {}] server.Server(415): Started @117191ms 2024-11-19T01:09:05,897 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:09:06,022 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data10/current/BP-397303664-172.17.0.2-1731978532996/current, will proceed with Du for space computation calculation, 2024-11-19T01:09:06,022 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data9/current/BP-397303664-172.17.0.2-1731978532996/current, will proceed with Du for space computation calculation, 2024-11-19T01:09:06,041 WARN [Thread-878 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:09:06,044 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf416f049a78b4aa with lease ID 0xb474c26cb170589e: Processing first storage report for DS-5be2ab8f-9145-4622-bba0-f414e758fe7a from datanode DatanodeRegistration(127.0.0.1:39661, datanodeUuid=63eefaaf-f597-4184-aa63-4b12358533e7, infoPort=43559, infoSecurePort=0, ipcPort=39495, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996) 2024-11-19T01:09:06,044 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf416f049a78b4aa with lease ID 0xb474c26cb170589e: from storage DS-5be2ab8f-9145-4622-bba0-f414e758fe7a node DatanodeRegistration(127.0.0.1:39661, datanodeUuid=63eefaaf-f597-4184-aa63-4b12358533e7, infoPort=43559, infoSecurePort=0, ipcPort=39495, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T01:09:06,044 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf416f049a78b4aa with lease ID 0xb474c26cb170589e: Processing first storage report for DS-ea930115-08e1-41f7-a704-8a5d101db954 from datanode DatanodeRegistration(127.0.0.1:39661, datanodeUuid=63eefaaf-f597-4184-aa63-4b12358533e7, infoPort=43559, infoSecurePort=0, ipcPort=39495, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996) 2024-11-19T01:09:06,044 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf416f049a78b4aa with lease ID 0xb474c26cb170589e: from storage DS-ea930115-08e1-41f7-a704-8a5d101db954 node DatanodeRegistration(127.0.0.1:39661, datanodeUuid=63eefaaf-f597-4184-aa63-4b12358533e7, infoPort=43559, infoSecurePort=0, ipcPort=39495, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:09:06,117 WARN [ResponseProcessor for block BP-397303664-172.17.0.2-1731978532996:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-397303664-172.17.0.2-1731978532996:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:06,117 WARN [ResponseProcessor for block BP-397303664-172.17.0.2-1731978532996:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-397303664-172.17.0.2-1731978532996:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:06,118 WARN [ResponseProcessor for block BP-397303664-172.17.0.2-1731978532996:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-397303664-172.17.0.2-1731978532996:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-397303664-172.17.0.2-1731978532996:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:06,118 WARN [ResponseProcessor for block BP-397303664-172.17.0.2-1731978532996:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-397303664-172.17.0.2-1731978532996:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-397303664-172.17.0.2-1731978532996:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:06,118 WARN [DataStreamer for file /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/WALs/5134ffc85563,35651,1731978533878/5134ffc85563%2C35651%2C1731978533878.1731978534127 block BP-397303664-172.17.0.2-1731978532996:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK], DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]) is bad. 2024-11-19T01:09:06,118 WARN [DataStreamer for file /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 block BP-397303664-172.17.0.2-1731978532996:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK], DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]) is bad. 2024-11-19T01:09:06,118 WARN [DataStreamer for file /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta block BP-397303664-172.17.0.2-1731978532996:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK], DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]) is bad. 2024-11-19T01:09:06,118 WARN [DataStreamer for file /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978534471 block BP-397303664-172.17.0.2-1731978532996:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK], DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]) is bad. 2024-11-19T01:09:06,118 WARN [PacketResponder: BP-397303664-172.17.0.2-1731978532996:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41819] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:06,119 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-292974490_22 at /127.0.0.1:45412 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45412 dst: /127.0.0.1:39479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:06,119 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:57808 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41819:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57808 dst: /127.0.0.1:41819 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:06,119 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:57814 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41819:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57814 dst: /127.0.0.1:41819 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:06,119 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-292974490_22 at /127.0.0.1:57786 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41819:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57786 dst: /127.0.0.1:41819 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:06,120 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:45442 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45442 dst: /127.0.0.1:39479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:06,118 WARN [PacketResponder: BP-397303664-172.17.0.2-1731978532996:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41819] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:06,120 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:45430 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45430 dst: /127.0.0.1:39479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:06,120 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262522587_22 at /127.0.0.1:45456 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:39479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45456 dst: /127.0.0.1:39479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:06,121 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f2859b3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:09:06,121 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262522587_22 at /127.0.0.1:57842 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:41819:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57842 dst: /127.0.0.1:41819 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:06,122 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3b918d2a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:09:06,122 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:09:06,122 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7eee535{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:09:06,122 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66182b08{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/hadoop.log.dir/,STOPPED} 2024-11-19T01:09:06,123 WARN [BP-397303664-172.17.0.2-1731978532996 heartbeating to localhost/127.0.0.1:40963 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:09:06,123 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:09:06,123 WARN [BP-397303664-172.17.0.2-1731978532996 heartbeating to localhost/127.0.0.1:40963 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-397303664-172.17.0.2-1731978532996 (Datanode Uuid bf6c2955-85f5-4c42-a530-315160b9fc3f) service to localhost/127.0.0.1:40963 2024-11-19T01:09:06,123 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:09:06,124 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data3/current/BP-397303664-172.17.0.2-1731978532996 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:09:06,124 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data4/current/BP-397303664-172.17.0.2-1731978532996 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:09:06,125 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:09:06,125 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@20c58ca3 {}] datanode.DataXceiver(331): 127.0.0.1:39479:DataXceiver error processing unknown operation src: /127.0.0.1:55272 dst: /127.0.0.1:39479 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:06,125 WARN [DataStreamer for file /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978534471 block BP-397303664-172.17.0.2-1731978532996:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:06,125 WARN [DataStreamer for file /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/WALs/5134ffc85563,35651,1731978533878/5134ffc85563%2C35651%2C1731978533878.1731978534127 block BP-397303664-172.17.0.2-1731978532996:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:06,125 WARN [DataStreamer for file /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta block BP-397303664-172.17.0.2-1731978532996:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:06,126 WARN [DataStreamer for file /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 block BP-397303664-172.17.0.2-1731978532996:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:06,127 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6de86657{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:09:06,128 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6787773a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:09:06,128 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:09:06,128 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a107105{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:09:06,128 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65dec1b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/hadoop.log.dir/,STOPPED} 2024-11-19T01:09:06,130 WARN [BP-397303664-172.17.0.2-1731978532996 heartbeating to localhost/127.0.0.1:40963 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:09:06,130 WARN [BP-397303664-172.17.0.2-1731978532996 heartbeating to localhost/127.0.0.1:40963 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-397303664-172.17.0.2-1731978532996 (Datanode Uuid b2f5f431-118e-4697-a012-633a30fcbc5b) service to localhost/127.0.0.1:40963 2024-11-19T01:09:06,130 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data1/current/BP-397303664-172.17.0.2-1731978532996 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:09:06,130 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data2/current/BP-397303664-172.17.0.2-1731978532996 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:09:06,130 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:09:06,131 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:09:06,131 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:09:06,134 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1., hostname=5134ffc85563,46771,1731978533954, seqNum=2] 2024-11-19T01:09:06,136 ERROR [FSHLog-0-hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c-prefix:5134ffc85563,46771,1731978533954 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:06,136 WARN [FSHLog-0-hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c-prefix:5134ffc85563,46771,1731978533954 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:06,136 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:06,136 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5134ffc85563%2C46771%2C1731978533954:(num 1731978534471) roll requested 2024-11-19T01:09:06,137 INFO [regionserver/5134ffc85563:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C46771%2C1731978533954.1731978546136 2024-11-19T01:09:06,142 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:06,142 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:06,143 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:06,143 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:06,143 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:06,143 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978534471 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978546136 2024-11-19T01:09:06,143 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:06,144 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:06,145 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-19T01:09:06,145 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37835:37835),(127.0.0.1/127.0.0.1:43559:43559)] 2024-11-19T01:09:06,145 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978534471 is not closed yet, will try archiving it next time 2024-11-19T01:09:06,145 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-19T01:09:06,145 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978534471 2024-11-19T01:09:06,148 WARN [IPC Server handler 4 on default port 40963 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978534471 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-19T01:09:06,151 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978534471 after 5ms 2024-11-19T01:09:07,238 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:08,090 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:08,145 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:08,146 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978546136 2024-11-19T01:09:08,147 WARN [ResponseProcessor for block BP-397303664-172.17.0.2-1731978532996:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-397303664-172.17.0.2-1731978532996:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:08,147 WARN [DataStreamer for file /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978546136 block BP-397303664-172.17.0.2-1731978532996:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK], DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK]) is bad. 2024-11-19T01:09:08,148 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:51242 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:46773:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51242 dst: /127.0.0.1:46773 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:08,148 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:59252 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:39661:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59252 dst: /127.0.0.1:39661 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:08,149 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7a2a3a4f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:09:08,149 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7fa27241{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:09:08,150 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:09:08,150 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@402062d6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:09:08,150 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b53e4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/hadoop.log.dir/,STOPPED} 2024-11-19T01:09:08,151 WARN [BP-397303664-172.17.0.2-1731978532996 heartbeating to localhost/127.0.0.1:40963 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:09:08,151 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:09:08,151 WARN [BP-397303664-172.17.0.2-1731978532996 heartbeating to localhost/127.0.0.1:40963 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-397303664-172.17.0.2-1731978532996 (Datanode Uuid ffa1a434-7c42-4ec7-ba60-2acd46c9b2b5) service to localhost/127.0.0.1:40963 2024-11-19T01:09:08,151 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:09:08,152 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data5/current/BP-397303664-172.17.0.2-1731978532996 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:09:08,152 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data6/current/BP-397303664-172.17.0.2-1731978532996 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:09:08,152 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:09:09,238 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:10,090 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:10,145 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:10,146 WARN [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]] 2024-11-19T01:09:10,146 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5134ffc85563%2C46771%2C1731978533954:(num 1731978546136) roll requested 2024-11-19T01:09:10,146 INFO [regionserver/5134ffc85563:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C46771%2C1731978533954.1731978550146 2024-11-19T01:09:10,152 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978534471 after 4007ms 2024-11-19T01:09:10,153 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:10,153 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:10,153 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:10,153 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:10,153 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:10,153 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978546136 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978550146 2024-11-19T01:09:10,154 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41485:41485),(127.0.0.1/127.0.0.1:43559:43559)] 2024-11-19T01:09:10,154 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978534471 is not closed yet, will try archiving it next time 2024-11-19T01:09:10,154 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978546136 is not closed yet, will try archiving it next time 2024-11-19T01:09:10,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39661 is added to blk_1073741838_1020 (size=2431) 2024-11-19T01:09:10,157 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T01:09:10,556 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978534471 is not closed yet, will try archiving it next time 2024-11-19T01:09:11,239 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:12,055 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2882550b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39661, datanodeUuid=63eefaaf-f597-4184-aa63-4b12358533e7, infoPort=43559, infoSecurePort=0, ipcPort=39495, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996):Failed to transfer BP-397303664-172.17.0.2-1731978532996:blk_1073741838_1020 to 127.0.0.1:39479 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:12,091 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:12,154 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:12,161 WARN [ResponseProcessor for block BP-397303664-172.17.0.2-1731978532996:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-397303664-172.17.0.2-1731978532996:blk_1073741839_1021 java.io.IOException: Bad response ERROR for BP-397303664-172.17.0.2-1731978532996:blk_1073741839_1021 from datanode DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:12,161 WARN [DataStreamer for file /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978550146 block BP-397303664-172.17.0.2-1731978532996:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK], DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]) is bad. 2024-11-19T01:09:12,161 WARN [PacketResponder: BP-397303664-172.17.0.2-1731978532996:blk_1073741839_1021, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39661] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:12,162 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:47500 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:41521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47500 dst: /127.0.0.1:41521 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:12,162 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:59276 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:39661:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59276 dst: /127.0.0.1:39661 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:12,163 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b3e7853{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:09:12,163 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@65429201{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:09:12,163 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:09:12,163 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25509568{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:09:12,163 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c141b19{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/hadoop.log.dir/,STOPPED} 2024-11-19T01:09:12,165 WARN [BP-397303664-172.17.0.2-1731978532996 heartbeating to localhost/127.0.0.1:40963 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:09:12,165 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:09:12,165 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:09:12,165 WARN [BP-397303664-172.17.0.2-1731978532996 heartbeating to localhost/127.0.0.1:40963 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-397303664-172.17.0.2-1731978532996 (Datanode Uuid 63eefaaf-f597-4184-aa63-4b12358533e7) service to localhost/127.0.0.1:40963 2024-11-19T01:09:12,166 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data9/current/BP-397303664-172.17.0.2-1731978532996 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:09:12,166 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data10/current/BP-397303664-172.17.0.2-1731978532996 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:09:12,166 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:09:12,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46771 {}] regionserver.HRegion(8855): Flush requested on 8eb7dabc2efd0fda67941096dba183a1 2024-11-19T01:09:12,175 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8eb7dabc2efd0fda67941096dba183a1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T01:09:12,196 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/f4ffc962ee4b49508c0d39e0147e69f0 is 1080, key is row0002/info:/1731978548153/Put/seqid=0 2024-11-19T01:09:12,199 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41819 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:12,199 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42646 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741840_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data8]'}, localName='127.0.0.1:41521', datanodeUuid='da93b273-27b3-4855-a317-6873de7cd5f5', xmitsInProgress=0}:Exception transferring block BP-397303664-172.17.0.2-1731978532996:blk_1073741840_1023 to mirror 127.0.0.1:41819 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:12,199 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741840_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK], DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]) is bad. 2024-11-19T01:09:12,199 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741840_1023 2024-11-19T01:09:12,199 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42646 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741840_1023] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T01:09:12,199 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42646 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741840_1023] {}] datanode.DataXceiver(331): 127.0.0.1:41521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42646 dst: /127.0.0.1:41521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:12,202 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK] 2024-11-19T01:09:12,205 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1024 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:12,205 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741841_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK], DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]) is bad. 2024-11-19T01:09:12,205 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741841_1024 2024-11-19T01:09:12,205 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK] 2024-11-19T01:09:12,206 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:12,207 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK], DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK]) is bad. 2024-11-19T01:09:12,207 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741842_1025 2024-11-19T01:09:12,207 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK] 2024-11-19T01:09:12,208 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:12,208 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK], DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]) is bad. 2024-11-19T01:09:12,208 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741843_1026 2024-11-19T01:09:12,209 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK] 2024-11-19T01:09:12,210 WARN [IPC Server handler 2 on default port 40963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T01:09:12,210 WARN [IPC Server handler 2 on default port 40963 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T01:09:12,210 WARN [IPC Server handler 2 on default port 40963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T01:09:12,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741844_1027 (size=10347) 2024-11-19T01:09:12,614 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/f4ffc962ee4b49508c0d39e0147e69f0 2024-11-19T01:09:12,621 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/f4ffc962ee4b49508c0d39e0147e69f0 as hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/f4ffc962ee4b49508c0d39e0147e69f0 2024-11-19T01:09:12,627 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/f4ffc962ee4b49508c0d39e0147e69f0, entries=5, sequenceid=11, filesize=10.1 K 2024-11-19T01:09:12,628 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 8eb7dabc2efd0fda67941096dba183a1 in 453ms, sequenceid=11, compaction requested=false 2024-11-19T01:09:12,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8eb7dabc2efd0fda67941096dba183a1: 2024-11-19T01:09:12,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46771 {}] regionserver.HRegion(8855): Flush requested on 8eb7dabc2efd0fda67941096dba183a1 2024-11-19T01:09:12,797 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8eb7dabc2efd0fda67941096dba183a1 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-19T01:09:12,802 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/2c951c3369ed4cd48eb55e23a15cba52 is 1080, key is row0007/info:/1731978552176/Put/seqid=0 2024-11-19T01:09:12,804 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41819 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:12,804 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42674 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741845_1028] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data8]'}, localName='127.0.0.1:41521', datanodeUuid='da93b273-27b3-4855-a317-6873de7cd5f5', xmitsInProgress=0}:Exception transferring block BP-397303664-172.17.0.2-1731978532996:blk_1073741845_1028 to mirror 127.0.0.1:41819 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:12,805 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK], DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]) is bad. 2024-11-19T01:09:12,805 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741845_1028 2024-11-19T01:09:12,805 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42674 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741845_1028] {}] datanode.BlockReceiver(316): Block 1073741845 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T01:09:12,805 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42674 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741845_1028] {}] datanode.DataXceiver(331): 127.0.0.1:41521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42674 dst: /127.0.0.1:41521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:12,805 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK] 2024-11-19T01:09:12,807 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46773 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:12,807 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42676 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data8]'}, localName='127.0.0.1:41521', datanodeUuid='da93b273-27b3-4855-a317-6873de7cd5f5', xmitsInProgress=0}:Exception transferring block BP-397303664-172.17.0.2-1731978532996:blk_1073741846_1029 to mirror 127.0.0.1:46773 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:12,808 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK], DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK]) is bad. 2024-11-19T01:09:12,808 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741846_1029 2024-11-19T01:09:12,808 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42676 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T01:09:12,808 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42676 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:41521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42676 dst: /127.0.0.1:41521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:12,808 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK] 2024-11-19T01:09:12,810 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:12,810 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK], DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]) is bad. 2024-11-19T01:09:12,810 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741847_1030 2024-11-19T01:09:12,810 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK] 2024-11-19T01:09:12,811 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:12,811 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK], DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]) is bad. 2024-11-19T01:09:12,811 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741848_1031 2024-11-19T01:09:12,812 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK] 2024-11-19T01:09:12,812 WARN [IPC Server handler 2 on default port 40963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T01:09:12,812 WARN [IPC Server handler 2 on default port 40963 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T01:09:12,812 WARN [IPC Server handler 2 on default port 40963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T01:09:12,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741849_1032 (size=12506) 2024-11-19T01:09:13,216 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/2c951c3369ed4cd48eb55e23a15cba52 2024-11-19T01:09:13,224 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/2c951c3369ed4cd48eb55e23a15cba52 as hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/2c951c3369ed4cd48eb55e23a15cba52 2024-11-19T01:09:13,230 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/2c951c3369ed4cd48eb55e23a15cba52, entries=7, sequenceid=24, filesize=12.2 K 2024-11-19T01:09:13,231 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 8eb7dabc2efd0fda67941096dba183a1 in 434ms, sequenceid=24, compaction requested=false 2024-11-19T01:09:13,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8eb7dabc2efd0fda67941096dba183a1: 2024-11-19T01:09:13,231 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-19T01:09:13,231 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:09:13,231 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/2c951c3369ed4cd48eb55e23a15cba52 because midkey is the same as first or last row 2024-11-19T01:09:13,239 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:14,091 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:14,155 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:14,155 WARN [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK]] 2024-11-19T01:09:14,155 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5134ffc85563%2C46771%2C1731978533954:(num 1731978550146) roll requested 2024-11-19T01:09:14,156 INFO [regionserver/5134ffc85563:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C46771%2C1731978533954.1731978554155 2024-11-19T01:09:14,158 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:14,159 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK], DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]) is bad. 2024-11-19T01:09:14,159 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741850_1033 2024-11-19T01:09:14,159 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK] 2024-11-19T01:09:14,160 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:14,160 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK], DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]) is bad. 2024-11-19T01:09:14,160 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741851_1034 2024-11-19T01:09:14,161 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK] 2024-11-19T01:09:14,162 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:14,162 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK], DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK]) is bad. 2024-11-19T01:09:14,162 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741852_1035 2024-11-19T01:09:14,163 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK] 2024-11-19T01:09:14,165 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39479 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:14,165 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42702 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741853_1036] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data8]'}, localName='127.0.0.1:41521', datanodeUuid='da93b273-27b3-4855-a317-6873de7cd5f5', xmitsInProgress=0}:Exception transferring block BP-397303664-172.17.0.2-1731978532996:blk_1073741853_1036 to mirror 127.0.0.1:39479 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:14,165 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK], DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]) is bad. 2024-11-19T01:09:14,165 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741853_1036 2024-11-19T01:09:14,166 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42702 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741853_1036] {}] datanode.BlockReceiver(316): Block 1073741853 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T01:09:14,166 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42702 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741853_1036] {}] datanode.DataXceiver(331): 127.0.0.1:41521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42702 dst: /127.0.0.1:41521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:14,166 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK] 2024-11-19T01:09:14,167 WARN [IPC Server handler 4 on default port 40963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T01:09:14,167 WARN [IPC Server handler 4 on default port 40963 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T01:09:14,167 WARN [IPC Server handler 4 on default port 40963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T01:09:14,169 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:14,170 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:14,170 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:14,170 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:14,170 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:14,170 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978550146 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978554155 2024-11-19T01:09:14,171 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41485:41485)] 2024-11-19T01:09:14,171 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978534471 is not closed yet, will try archiving it next time 2024-11-19T01:09:14,171 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978550146 is not closed yet, will try archiving it next time 2024-11-19T01:09:14,172 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978546136 to hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/oldWALs/5134ffc85563%2C46771%2C1731978533954.1731978546136 2024-11-19T01:09:14,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741839_1022 (size=25992) 2024-11-19T01:09:14,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46771 {}] regionserver.HRegion(8855): Flush requested on 8eb7dabc2efd0fda67941096dba183a1 2024-11-19T01:09:14,217 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8eb7dabc2efd0fda67941096dba183a1 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-19T01:09:14,221 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/c71fc00262db4f389f8380450b431d6e is 1079, key is tmprow/info:/1731978554215/Put/seqid=0 2024-11-19T01:09:14,223 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:14,223 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK], DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]) is bad. 2024-11-19T01:09:14,223 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741855_1038 2024-11-19T01:09:14,223 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK] 2024-11-19T01:09:14,225 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:14,225 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK], DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK]) is bad. 2024-11-19T01:09:14,225 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741856_1039 2024-11-19T01:09:14,225 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK] 2024-11-19T01:09:14,227 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41819 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:14,227 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42720 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741857_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data8]'}, localName='127.0.0.1:41521', datanodeUuid='da93b273-27b3-4855-a317-6873de7cd5f5', xmitsInProgress=0}:Exception transferring block BP-397303664-172.17.0.2-1731978532996:blk_1073741857_1040 to mirror 127.0.0.1:41819 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:14,227 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK], DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]) is bad. 2024-11-19T01:09:14,228 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741857_1040 2024-11-19T01:09:14,228 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42720 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741857_1040] {}] datanode.BlockReceiver(316): Block 1073741857 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T01:09:14,228 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42720 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741857_1040] {}] datanode.DataXceiver(331): 127.0.0.1:41521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42720 dst: /127.0.0.1:41521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:14,228 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK] 2024-11-19T01:09:14,229 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:14,229 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK], DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]) is bad. 2024-11-19T01:09:14,229 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741858_1041 2024-11-19T01:09:14,230 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK] 2024-11-19T01:09:14,230 WARN [IPC Server handler 1 on default port 40963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T01:09:14,230 WARN [IPC Server handler 1 on default port 40963 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T01:09:14,230 WARN [IPC Server handler 1 on default port 40963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T01:09:14,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741859_1042 (size=6027) 2024-11-19T01:09:14,573 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978534471 is not closed yet, will try archiving it next time 2024-11-19T01:09:14,634 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/c71fc00262db4f389f8380450b431d6e 2024-11-19T01:09:14,641 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/c71fc00262db4f389f8380450b431d6e as hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/c71fc00262db4f389f8380450b431d6e 2024-11-19T01:09:14,647 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/c71fc00262db4f389f8380450b431d6e, entries=1, sequenceid=34, filesize=5.9 K 2024-11-19T01:09:14,648 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 8eb7dabc2efd0fda67941096dba183a1 in 432ms, sequenceid=34, compaction requested=true 2024-11-19T01:09:14,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8eb7dabc2efd0fda67941096dba183a1: 2024-11-19T01:09:14,648 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-19T01:09:14,648 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:09:14,649 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/2c951c3369ed4cd48eb55e23a15cba52 because midkey is the same as first or last row 2024-11-19T01:09:14,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8eb7dabc2efd0fda67941096dba183a1:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T01:09:14,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:09:14,649 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T01:09:14,650 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T01:09:14,650 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.HStore(1541): 8eb7dabc2efd0fda67941096dba183a1/info is initiating minor compaction (all files) 2024-11-19T01:09:14,650 INFO [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8eb7dabc2efd0fda67941096dba183a1/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1. 2024-11-19T01:09:14,650 INFO [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/f4ffc962ee4b49508c0d39e0147e69f0, hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/2c951c3369ed4cd48eb55e23a15cba52, hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/c71fc00262db4f389f8380450b431d6e] into tmpdir=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp, totalSize=28.2 K 2024-11-19T01:09:14,651 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] compactions.Compactor(225): Compacting f4ffc962ee4b49508c0d39e0147e69f0, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731978548153 2024-11-19T01:09:14,651 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2c951c3369ed4cd48eb55e23a15cba52, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731978552176 2024-11-19T01:09:14,652 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] compactions.Compactor(225): Compacting c71fc00262db4f389f8380450b431d6e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731978554215 2024-11-19T01:09:14,664 INFO [RS:0;5134ffc85563:46771-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8eb7dabc2efd0fda67941096dba183a1#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T01:09:14,665 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/66b458c548f2493bb3c47a4f7e52fd26 is 1080, key is row0002/info:/1731978548153/Put/seqid=0 2024-11-19T01:09:14,667 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:14,667 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK], DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]) is bad. 2024-11-19T01:09:14,667 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741860_1043 2024-11-19T01:09:14,667 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK] 2024-11-19T01:09:14,669 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:14,669 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK], DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]) is bad. 2024-11-19T01:09:14,669 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741861_1044 2024-11-19T01:09:14,669 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK] 2024-11-19T01:09:14,670 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:14,671 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK], DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK]) is bad. 2024-11-19T01:09:14,671 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741862_1045 2024-11-19T01:09:14,671 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK] 2024-11-19T01:09:14,673 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41819 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:14,673 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK], DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]) is bad. 2024-11-19T01:09:14,673 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42756 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data8]'}, localName='127.0.0.1:41521', datanodeUuid='da93b273-27b3-4855-a317-6873de7cd5f5', xmitsInProgress=0}:Exception transferring block BP-397303664-172.17.0.2-1731978532996:blk_1073741863_1046 to mirror 127.0.0.1:41819 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:14,673 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741863_1046 2024-11-19T01:09:14,674 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42756 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T01:09:14,674 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42756 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:41521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42756 dst: /127.0.0.1:41521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:14,674 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK] 2024-11-19T01:09:14,674 WARN [IPC Server handler 3 on default port 40963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T01:09:14,674 WARN [IPC Server handler 3 on default port 40963 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T01:09:14,674 WARN [IPC Server handler 3 on default port 40963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T01:09:14,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741864_1047 (size=17994) 2024-11-19T01:09:14,684 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/66b458c548f2493bb3c47a4f7e52fd26 as hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/66b458c548f2493bb3c47a4f7e52fd26 2024-11-19T01:09:14,690 INFO [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8eb7dabc2efd0fda67941096dba183a1/info of 8eb7dabc2efd0fda67941096dba183a1 into 66b458c548f2493bb3c47a4f7e52fd26(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T01:09:14,690 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8eb7dabc2efd0fda67941096dba183a1: 2024-11-19T01:09:14,690 INFO [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1., storeName=8eb7dabc2efd0fda67941096dba183a1/info, priority=13, startTime=1731978554649; duration=0sec 2024-11-19T01:09:14,690 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-19T01:09:14,690 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:09:14,690 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/66b458c548f2493bb3c47a4f7e52fd26 because midkey is the same as first or last row 2024-11-19T01:09:14,691 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-19T01:09:14,691 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:09:14,691 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/66b458c548f2493bb3c47a4f7e52fd26 because midkey is the same as first or last row 2024-11-19T01:09:14,691 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-19T01:09:14,691 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:09:14,691 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/66b458c548f2493bb3c47a4f7e52fd26 because midkey is the same as first or last row 2024-11-19T01:09:14,691 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:09:14,691 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8eb7dabc2efd0fda67941096dba183a1:info 2024-11-19T01:09:14,817 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4708c4ac[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41521, datanodeUuid=da93b273-27b3-4855-a317-6873de7cd5f5, infoPort=41485, infoSecurePort=0, ipcPort=43465, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996):Failed to transfer BP-397303664-172.17.0.2-1731978532996:blk_1073741844_1027 to 127.0.0.1:46773 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:14,817 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@49cf323d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41521, datanodeUuid=da93b273-27b3-4855-a317-6873de7cd5f5, infoPort=41485, infoSecurePort=0, ipcPort=43465, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996):Failed to transfer BP-397303664-172.17.0.2-1731978532996:blk_1073741849_1032 to 127.0.0.1:39479 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:15,239 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:15,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46771 {}] regionserver.HRegion(8855): Flush requested on 8eb7dabc2efd0fda67941096dba183a1 2024-11-19T01:09:15,637 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8eb7dabc2efd0fda67941096dba183a1 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-19T01:09:15,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/c709f69821464bf7b5946df4947221ea is 1079, key is tmprow/info:/1731978555636/Put/seqid=0 2024-11-19T01:09:15,646 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:15,646 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK], DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]) is bad. 2024-11-19T01:09:15,646 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741865_1048 2024-11-19T01:09:15,647 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK] 2024-11-19T01:09:15,648 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:15,649 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK], DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK]) is bad. 2024-11-19T01:09:15,649 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741866_1049 2024-11-19T01:09:15,649 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK] 2024-11-19T01:09:15,651 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:15,651 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK], DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]) is bad. 2024-11-19T01:09:15,651 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741867_1050 2024-11-19T01:09:15,651 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK] 2024-11-19T01:09:15,653 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:15,653 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK], DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]) is bad. 2024-11-19T01:09:15,653 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741868_1051 2024-11-19T01:09:15,654 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK] 2024-11-19T01:09:15,655 WARN [IPC Server handler 4 on default port 40963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T01:09:15,655 WARN [IPC Server handler 4 on default port 40963 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T01:09:15,655 WARN [IPC Server handler 4 on default port 40963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T01:09:15,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741869_1052 (size=6027) 2024-11-19T01:09:15,662 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/c709f69821464bf7b5946df4947221ea 2024-11-19T01:09:15,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/c709f69821464bf7b5946df4947221ea as hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/c709f69821464bf7b5946df4947221ea 2024-11-19T01:09:15,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/c709f69821464bf7b5946df4947221ea, entries=1, sequenceid=45, filesize=5.9 K 2024-11-19T01:09:15,679 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 8eb7dabc2efd0fda67941096dba183a1 in 42ms, sequenceid=45, compaction requested=false 2024-11-19T01:09:15,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8eb7dabc2efd0fda67941096dba183a1: 2024-11-19T01:09:15,679 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-19T01:09:15,679 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:09:15,679 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/66b458c548f2493bb3c47a4f7e52fd26 because midkey is the same as first or last row 2024-11-19T01:09:15,818 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4708c4ac[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41521, datanodeUuid=da93b273-27b3-4855-a317-6873de7cd5f5, infoPort=41485, infoSecurePort=0, ipcPort=43465, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996):Failed to transfer BP-397303664-172.17.0.2-1731978532996:blk_1073741859_1042 to 127.0.0.1:39479 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:15,818 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@49cf323d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41521, datanodeUuid=da93b273-27b3-4855-a317-6873de7cd5f5, infoPort=41485, infoSecurePort=0, ipcPort=43465, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996):Failed to transfer BP-397303664-172.17.0.2-1731978532996:blk_1073741839_1022 to 127.0.0.1:46773 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:16,092 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:16,172 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:16,172 WARN [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK]] 2024-11-19T01:09:16,172 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5134ffc85563%2C46771%2C1731978533954:(num 1731978554155) roll requested 2024-11-19T01:09:16,172 INFO [regionserver/5134ffc85563:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C46771%2C1731978533954.1731978556172 2024-11-19T01:09:16,175 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:16,175 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK], DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]) is bad. 2024-11-19T01:09:16,175 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741870_1053 2024-11-19T01:09:16,176 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK] 2024-11-19T01:09:16,179 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46773 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:16,179 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK], DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK]) is bad. 2024-11-19T01:09:16,179 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42776 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data8]'}, localName='127.0.0.1:41521', datanodeUuid='da93b273-27b3-4855-a317-6873de7cd5f5', xmitsInProgress=0}:Exception transferring block BP-397303664-172.17.0.2-1731978532996:blk_1073741871_1054 to mirror 127.0.0.1:46773 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:16,179 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741871_1054 2024-11-19T01:09:16,179 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42776 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T01:09:16,179 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42776 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:41521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42776 dst: /127.0.0.1:41521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:16,180 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK] 2024-11-19T01:09:16,182 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42784 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data8]'}, localName='127.0.0.1:41521', datanodeUuid='da93b273-27b3-4855-a317-6873de7cd5f5', xmitsInProgress=0}:Exception transferring block BP-397303664-172.17.0.2-1731978532996:blk_1073741872_1055 to mirror 127.0.0.1:39661 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:16,182 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39661 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:16,182 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42784 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T01:09:16,182 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK], DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]) is bad. 2024-11-19T01:09:16,182 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741872_1055 2024-11-19T01:09:16,182 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42784 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:41521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42784 dst: /127.0.0.1:41521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:16,183 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK] 2024-11-19T01:09:16,184 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:16,184 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK], DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]) is bad. 2024-11-19T01:09:16,184 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741873_1056 2024-11-19T01:09:16,185 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK] 2024-11-19T01:09:16,185 WARN [IPC Server handler 3 on default port 40963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T01:09:16,185 WARN [IPC Server handler 3 on default port 40963 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T01:09:16,186 WARN [IPC Server handler 3 on default port 40963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T01:09:16,191 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:16,191 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:16,191 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:16,191 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:16,191 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:16,191 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978554155 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978556172 2024-11-19T01:09:16,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741854_1037 (size=13591) 2024-11-19T01:09:16,195 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41485:41485)] 2024-11-19T01:09:16,196 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978534471 is not closed yet, will try archiving it next time 2024-11-19T01:09:16,196 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978554155 is not closed yet, will try archiving it next time 2024-11-19T01:09:16,196 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978550146 to hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/oldWALs/5134ffc85563%2C46771%2C1731978533954.1731978550146 2024-11-19T01:09:16,594 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978534471 is not closed yet, will try archiving it next time 2024-11-19T01:09:17,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46771 {}] regionserver.HRegion(8855): Flush requested on 8eb7dabc2efd0fda67941096dba183a1 2024-11-19T01:09:17,062 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8eb7dabc2efd0fda67941096dba183a1 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-19T01:09:17,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/7e6102a0dfa54389b501810f02407333 is 1079, key is tmprow/info:/1731978557061/Put/seqid=0 2024-11-19T01:09:17,072 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:17,072 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK], DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK]) is bad. 2024-11-19T01:09:17,072 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741875_1058 2024-11-19T01:09:17,073 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK] 2024-11-19T01:09:17,075 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:17,075 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK], DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]) is bad. 2024-11-19T01:09:17,075 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741876_1059 2024-11-19T01:09:17,075 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK] 2024-11-19T01:09:17,078 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42804 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741877_1060] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data8]'}, localName='127.0.0.1:41521', datanodeUuid='da93b273-27b3-4855-a317-6873de7cd5f5', xmitsInProgress=0}:Exception transferring block BP-397303664-172.17.0.2-1731978532996:blk_1073741877_1060 to mirror 127.0.0.1:41819 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:17,078 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41819 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:17,078 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42804 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741877_1060] {}] datanode.BlockReceiver(316): Block 1073741877 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T01:09:17,078 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK], DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]) is bad. 2024-11-19T01:09:17,078 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741877_1060 2024-11-19T01:09:17,078 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42804 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741877_1060] {}] datanode.DataXceiver(331): 127.0.0.1:41521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42804 dst: /127.0.0.1:41521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:17,079 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK] 2024-11-19T01:09:17,081 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:17,081 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK], DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]) is bad. 2024-11-19T01:09:17,081 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741878_1061 2024-11-19T01:09:17,082 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK] 2024-11-19T01:09:17,083 WARN [IPC Server handler 2 on default port 40963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T01:09:17,083 WARN [IPC Server handler 2 on default port 40963 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T01:09:17,083 WARN [IPC Server handler 2 on default port 40963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T01:09:17,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741879_1062 (size=6027) 2024-11-19T01:09:17,240 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:17,489 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/7e6102a0dfa54389b501810f02407333 2024-11-19T01:09:17,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/7e6102a0dfa54389b501810f02407333 as hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/7e6102a0dfa54389b501810f02407333 2024-11-19T01:09:17,502 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/7e6102a0dfa54389b501810f02407333, entries=1, sequenceid=55, filesize=5.9 K 2024-11-19T01:09:17,503 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 8eb7dabc2efd0fda67941096dba183a1 in 441ms, sequenceid=55, compaction requested=true 2024-11-19T01:09:17,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8eb7dabc2efd0fda67941096dba183a1: 2024-11-19T01:09:17,503 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-19T01:09:17,503 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:09:17,503 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/66b458c548f2493bb3c47a4f7e52fd26 because midkey is the same as first or last row 2024-11-19T01:09:17,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8eb7dabc2efd0fda67941096dba183a1:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T01:09:17,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:09:17,503 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T01:09:17,504 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T01:09:17,504 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.HStore(1541): 8eb7dabc2efd0fda67941096dba183a1/info is initiating minor compaction (all files) 2024-11-19T01:09:17,505 INFO [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8eb7dabc2efd0fda67941096dba183a1/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1. 2024-11-19T01:09:17,505 INFO [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/66b458c548f2493bb3c47a4f7e52fd26, hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/c709f69821464bf7b5946df4947221ea, hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/7e6102a0dfa54389b501810f02407333] into tmpdir=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp, totalSize=29.3 K 2024-11-19T01:09:17,505 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] compactions.Compactor(225): Compacting 66b458c548f2493bb3c47a4f7e52fd26, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731978548153 2024-11-19T01:09:17,505 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] compactions.Compactor(225): Compacting c709f69821464bf7b5946df4947221ea, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731978555636 2024-11-19T01:09:17,506 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7e6102a0dfa54389b501810f02407333, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731978557061 2024-11-19T01:09:17,521 INFO [RS:0;5134ffc85563:46771-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8eb7dabc2efd0fda67941096dba183a1#info#compaction#24 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T01:09:17,522 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/d767fa9652174d3fb492577ff5b69a3d is 1080, key is row0002/info:/1731978548153/Put/seqid=0 2024-11-19T01:09:17,524 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:17,524 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK], DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]) is bad. 2024-11-19T01:09:17,525 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741880_1063 2024-11-19T01:09:17,525 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41819,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK] 2024-11-19T01:09:17,526 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:17,527 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK], DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]) is bad. 2024-11-19T01:09:17,527 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741881_1064 2024-11-19T01:09:17,527 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK] 2024-11-19T01:09:17,530 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46773 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:17,530 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42824 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741882_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data8]'}, localName='127.0.0.1:41521', datanodeUuid='da93b273-27b3-4855-a317-6873de7cd5f5', xmitsInProgress=0}:Exception transferring block BP-397303664-172.17.0.2-1731978532996:blk_1073741882_1065 to mirror 127.0.0.1:46773 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:17,530 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK], DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK]) is bad. 2024-11-19T01:09:17,530 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741882_1065 2024-11-19T01:09:17,531 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42824 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741882_1065] {}] datanode.BlockReceiver(316): Block 1073741882 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T01:09:17,531 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:42824 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741882_1065] {}] datanode.DataXceiver(331): 127.0.0.1:41521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42824 dst: /127.0.0.1:41521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:17,531 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK] 2024-11-19T01:09:17,532 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:17,532 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK], DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]) is bad. 2024-11-19T01:09:17,533 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741883_1066 2024-11-19T01:09:17,533 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK] 2024-11-19T01:09:17,534 WARN [IPC Server handler 4 on default port 40963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T01:09:17,534 WARN [IPC Server handler 4 on default port 40963 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T01:09:17,534 WARN [IPC Server handler 4 on default port 40963 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T01:09:17,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741884_1067 (size=18097) 2024-11-19T01:09:17,551 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/d767fa9652174d3fb492577ff5b69a3d as hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/d767fa9652174d3fb492577ff5b69a3d 2024-11-19T01:09:17,558 INFO [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8eb7dabc2efd0fda67941096dba183a1/info of 8eb7dabc2efd0fda67941096dba183a1 into d767fa9652174d3fb492577ff5b69a3d(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T01:09:17,558 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8eb7dabc2efd0fda67941096dba183a1: 2024-11-19T01:09:17,558 INFO [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1., storeName=8eb7dabc2efd0fda67941096dba183a1/info, priority=13, startTime=1731978557503; duration=0sec 2024-11-19T01:09:17,559 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-19T01:09:17,559 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:09:17,559 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/d767fa9652174d3fb492577ff5b69a3d because midkey is the same as first or last row 2024-11-19T01:09:17,559 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-19T01:09:17,559 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:09:17,559 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/d767fa9652174d3fb492577ff5b69a3d because midkey is the same as first or last row 2024-11-19T01:09:17,559 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-19T01:09:17,559 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:09:17,559 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/d767fa9652174d3fb492577ff5b69a3d because midkey is the same as first or last row 2024-11-19T01:09:17,559 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:09:17,559 DEBUG [RS:0;5134ffc85563:46771-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8eb7dabc2efd0fda67941096dba183a1:info 2024-11-19T01:09:17,818 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4708c4ac[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41521, datanodeUuid=da93b273-27b3-4855-a317-6873de7cd5f5, infoPort=41485, infoSecurePort=0, ipcPort=43465, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996):Failed to transfer BP-397303664-172.17.0.2-1731978532996:blk_1073741869_1052 to 127.0.0.1:39479 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:17,818 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@49cf323d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41521, datanodeUuid=da93b273-27b3-4855-a317-6873de7cd5f5, infoPort=41485, infoSecurePort=0, ipcPort=43465, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996):Failed to transfer BP-397303664-172.17.0.2-1731978532996:blk_1073741864_1047 to 127.0.0.1:41819 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:18,092 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:18,196 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:18,196 WARN [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-19T01:09:18,290 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:09:18,293 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:09:18,294 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:09:18,294 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:09:18,294 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T01:09:18,295 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2852206a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:09:18,296 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c1d8e25{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:09:18,411 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@47bbe019{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/java.io.tmpdir/jetty-localhost-41625-hadoop-hdfs-3_4_1-tests_jar-_-any-9456298086494972832/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:09:18,411 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6f8ca33c{HTTP/1.1, (http/1.1)}{localhost:41625} 2024-11-19T01:09:18,411 INFO [Time-limited test {}] server.Server(415): Started @129707ms 2024-11-19T01:09:18,413 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:09:18,515 WARN [Thread-988 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:09:18,527 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7e5aa013de95a7fb with lease ID 0xb474c26cb170589f: from storage DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b node DatanodeRegistration(127.0.0.1:45827, datanodeUuid=bf6c2955-85f5-4c42-a530-315160b9fc3f, infoPort=36313, infoSecurePort=0, ipcPort=37831, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T01:09:18,528 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7e5aa013de95a7fb with lease ID 0xb474c26cb170589f: from storage DS-572c03a4-0198-4757-b0ce-aacb3fc0872a node DatanodeRegistration(127.0.0.1:45827, datanodeUuid=bf6c2955-85f5-4c42-a530-315160b9fc3f, infoPort=36313, infoSecurePort=0, ipcPort=37831, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:09:18,818 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4708c4ac[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41521, datanodeUuid=da93b273-27b3-4855-a317-6873de7cd5f5, infoPort=41485, infoSecurePort=0, ipcPort=43465, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996):Failed to transfer BP-397303664-172.17.0.2-1731978532996:blk_1073741879_1062 to 127.0.0.1:39661 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:18,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45827 is added to blk_1073741854_1037 (size=13591) 2024-11-19T01:09:19,240 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:20,092 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:20,196 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:20,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45827 is added to blk_1073741884_1067 (size=18097) 2024-11-19T01:09:21,241 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:22,093 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:22,197 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:23,241 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:23,838 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T01:09:24,093 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:24,197 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:24,215 ERROR [FSHLog-0-hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData-prefix:5134ffc85563,35651,1731978533878 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:24,215 WARN [FSHLog-0-hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData-prefix:5134ffc85563,35651,1731978533878 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:24,215 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 5134ffc85563%2C35651%2C1731978533878:(num 1731978534127) roll requested 2024-11-19T01:09:24,216 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C35651%2C1731978533878.1731978564216 2024-11-19T01:09:24,220 WARN [Thread-1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46773 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:24,220 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-292974490_22 at /127.0.0.1:43162 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741885_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data8]'}, localName='127.0.0.1:41521', datanodeUuid='da93b273-27b3-4855-a317-6873de7cd5f5', xmitsInProgress=0}:Exception transferring block BP-397303664-172.17.0.2-1731978532996:blk_1073741885_1068 to mirror 127.0.0.1:46773 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:24,220 WARN [Thread-1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK], DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK]) is bad. 2024-11-19T01:09:24,220 WARN [Thread-1010 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741885_1068 2024-11-19T01:09:24,220 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-292974490_22 at /127.0.0.1:43162 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741885_1068] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T01:09:24,221 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-292974490_22 at /127.0.0.1:43162 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741885_1068] {}] datanode.DataXceiver(331): 127.0.0.1:41521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43162 dst: /127.0.0.1:41521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:24,221 WARN [Thread-1010 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46773,DS-c71694d8-3b08-4346-9c6b-5a634a56ea22,DISK] 2024-11-19T01:09:24,225 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:24,225 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:24,225 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:24,225 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:24,225 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:24,226 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/WALs/5134ffc85563,35651,1731978533878/5134ffc85563%2C35651%2C1731978533878.1731978534127 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/WALs/5134ffc85563,35651,1731978533878/5134ffc85563%2C35651%2C1731978533878.1731978564216 2024-11-19T01:09:24,226 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:24,226 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:24,226 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/WALs/5134ffc85563,35651,1731978533878/5134ffc85563%2C35651%2C1731978533878.1731978534127 2024-11-19T01:09:24,227 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36313:36313),(127.0.0.1/127.0.0.1:41485:41485)] 2024-11-19T01:09:24,227 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/WALs/5134ffc85563,35651,1731978533878/5134ffc85563%2C35651%2C1731978533878.1731978534127 is not closed yet, will try archiving it next time 2024-11-19T01:09:24,227 WARN [IPC Server handler 1 on default port 40963 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/WALs/5134ffc85563,35651,1731978533878/5134ffc85563%2C35651%2C1731978533878.1731978534127 has not been closed. Lease recovery is in progress. RecoveryId = 1070 for block blk_1073741830_1006 2024-11-19T01:09:24,227 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/WALs/5134ffc85563,35651,1731978533878/5134ffc85563%2C35651%2C1731978533878.1731978534127 after 1ms 2024-11-19T01:09:25,241 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:26,198 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:27,243 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:28,198 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:28,230 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/WALs/5134ffc85563,35651,1731978533878/5134ffc85563%2C35651%2C1731978533878.1731978534127 after 4004ms 2024-11-19T01:09:28,546 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3d5ace59 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-397303664-172.17.0.2-1731978532996:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:39479,null,null]) java.net.ConnectException: Call From 5134ffc85563/172.17.0.2 to localhost:40121 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-19T01:09:28,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45827 is added to blk_1073741833_1019 (size=455) 2024-11-19T01:09:29,171 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978534471 to hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/oldWALs/5134ffc85563%2C46771%2C1731978533954.1731978534471 2024-11-19T01:09:29,173 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978554155 to hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/oldWALs/5134ffc85563%2C46771%2C1731978533954.1731978554155 2024-11-19T01:09:29,244 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:29,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741835_1011 (size=393) 2024-11-19T01:09:29,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741831_1007 (size=1321) 2024-11-19T01:09:30,198 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:30,521 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@36b6199f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45827, datanodeUuid=bf6c2955-85f5-4c42-a530-315160b9fc3f, infoPort=36313, infoSecurePort=0, ipcPort=37831, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996):Failed to transfer BP-397303664-172.17.0.2-1731978532996:blk_1073741829_1005 to 127.0.0.1:39661 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:30,522 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@c2dd6d3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45827, datanodeUuid=bf6c2955-85f5-4c42-a530-315160b9fc3f, infoPort=36313, infoSecurePort=0, ipcPort=37831, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996):Failed to transfer BP-397303664-172.17.0.2-1731978532996:blk_1073741827_1003 to 127.0.0.1:39661 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:31,244 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:32,064 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C46771%2C1731978533954.1731978572064 2024-11-19T01:09:32,069 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-292974490_22 at /127.0.0.1:49776 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741887_1071] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data8]'}, localName='127.0.0.1:41521', datanodeUuid='da93b273-27b3-4855-a317-6873de7cd5f5', xmitsInProgress=0}:Exception transferring block BP-397303664-172.17.0.2-1731978532996:blk_1073741887_1071 to mirror 127.0.0.1:39661 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:32,069 WARN [Thread-1024 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1071 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39661 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:32,069 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-292974490_22 at /127.0.0.1:49776 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741887_1071] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T01:09:32,069 WARN [Thread-1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741887_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK], DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]) is bad. 2024-11-19T01:09:32,069 WARN [Thread-1024 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741887_1071 2024-11-19T01:09:32,069 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-292974490_22 at /127.0.0.1:49776 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741887_1071] {}] datanode.DataXceiver(331): 127.0.0.1:41521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49776 dst: /127.0.0.1:41521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:32,070 WARN [Thread-1024 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK] 2024-11-19T01:09:32,075 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:32,075 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:32,075 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:32,075 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:32,076 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:32,077 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978556172 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978572064 2024-11-19T01:09:32,079 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36313:36313),(127.0.0.1/127.0.0.1:41485:41485)] 2024-11-19T01:09:32,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741874_1057 (size=12911) 2024-11-19T01:09:32,079 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978556172 is not closed yet, will try archiving it next time 2024-11-19T01:09:32,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46771 {}] regionserver.HRegion(8855): Flush requested on 8eb7dabc2efd0fda67941096dba183a1 2024-11-19T01:09:32,084 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8eb7dabc2efd0fda67941096dba183a1 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-19T01:09:32,111 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/b849106e7ec445209355222341b0a49c is 1080, key is row0013/info:/1731978572081/Put/seqid=0 2024-11-19T01:09:32,122 WARN [Thread-1031 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1073 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39661 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:32,122 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:49790 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741889_1073] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data8]'}, localName='127.0.0.1:41521', datanodeUuid='da93b273-27b3-4855-a317-6873de7cd5f5', xmitsInProgress=0}:Exception transferring block BP-397303664-172.17.0.2-1731978532996:blk_1073741889_1073 to mirror 127.0.0.1:39661 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:32,122 WARN [Thread-1031 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741889_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41521,DS-3a4ff687-fb8b-439b-92cd-24b1289c8138,DISK], DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]) is bad. 2024-11-19T01:09:32,123 WARN [Thread-1031 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741889_1073 2024-11-19T01:09:32,123 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:49790 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741889_1073] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T01:09:32,123 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:49790 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741889_1073] {}] datanode.DataXceiver(331): 127.0.0.1:41521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49790 dst: /127.0.0.1:41521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:32,123 WARN [Thread-1031 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK] 2024-11-19T01:09:32,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45827 is added to blk_1073741890_1074 (size=8190) 2024-11-19T01:09:32,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741890_1074 (size=8190) 2024-11-19T01:09:32,150 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/b849106e7ec445209355222341b0a49c 2024-11-19T01:09:32,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/b849106e7ec445209355222341b0a49c as hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/b849106e7ec445209355222341b0a49c 2024-11-19T01:09:32,183 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/b849106e7ec445209355222341b0a49c, entries=3, sequenceid=66, filesize=8.0 K 2024-11-19T01:09:32,185 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for 8eb7dabc2efd0fda67941096dba183a1 in 100ms, sequenceid=66, compaction requested=false 2024-11-19T01:09:32,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8eb7dabc2efd0fda67941096dba183a1: 2024-11-19T01:09:32,185 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-19T01:09:32,185 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:09:32,185 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/d767fa9652174d3fb492577ff5b69a3d because midkey is the same as first or last row 2024-11-19T01:09:32,199 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:32,199 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-19T01:09:32,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T01:09:32,331 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T01:09:32,332 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:09:32,332 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:09:32,332 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:09:32,332 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T01:09:32,332 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T01:09:32,332 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=924258338, stopped=false 2024-11-19T01:09:32,332 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5134ffc85563,35651,1731978533878 2024-11-19T01:09:32,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43549-0x101088a107c0002, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T01:09:32,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T01:09:32,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T01:09:32,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:09:32,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:09:32,336 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T01:09:32,336 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T01:09:32,337 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:09:32,337 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:09:32,337 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5134ffc85563,46771,1731978533954' ***** 2024-11-19T01:09:32,337 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T01:09:32,337 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5134ffc85563,43549,1731978535122' ***** 2024-11-19T01:09:32,337 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T01:09:32,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43549-0x101088a107c0002, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:09:32,338 INFO [RS:0;5134ffc85563:46771 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T01:09:32,338 INFO [RS:0;5134ffc85563:46771 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T01:09:32,338 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:09:32,338 INFO [RS:0;5134ffc85563:46771 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T01:09:32,338 INFO [RS:0;5134ffc85563:46771 {}] regionserver.HRegionServer(3091): Received CLOSE for 8eb7dabc2efd0fda67941096dba183a1 2024-11-19T01:09:32,338 INFO [RS:1;5134ffc85563:43549 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T01:09:32,338 INFO [RS:1;5134ffc85563:43549 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T01:09:32,338 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T01:09:32,338 INFO [RS:1;5134ffc85563:43549 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T01:09:32,338 INFO [RS:1;5134ffc85563:43549 {}] regionserver.HRegionServer(959): stopping server 5134ffc85563,43549,1731978535122 2024-11-19T01:09:32,338 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43549-0x101088a107c0002, quorum=127.0.0.1:61906, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:09:32,338 INFO [RS:1;5134ffc85563:43549 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T01:09:32,339 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T01:09:32,339 INFO [RS:1;5134ffc85563:43549 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;5134ffc85563:43549. 2024-11-19T01:09:32,339 DEBUG [RS:1;5134ffc85563:43549 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:09:32,339 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:09:32,339 DEBUG [RS:1;5134ffc85563:43549 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:09:32,339 INFO [RS:1;5134ffc85563:43549 {}] regionserver.HRegionServer(976): stopping server 5134ffc85563,43549,1731978535122; all regions closed. 2024-11-19T01:09:32,341 INFO [RS:0;5134ffc85563:46771 {}] regionserver.HRegionServer(959): stopping server 5134ffc85563,46771,1731978533954 2024-11-19T01:09:32,341 INFO [RS:0;5134ffc85563:46771 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T01:09:32,341 INFO [RS:0;5134ffc85563:46771 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5134ffc85563:46771. 2024-11-19T01:09:32,341 DEBUG [RS:0;5134ffc85563:46771 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:09:32,341 DEBUG [RS:0;5134ffc85563:46771 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:09:32,342 INFO [RS:0;5134ffc85563:46771 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T01:09:32,342 INFO [RS:0;5134ffc85563:46771 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T01:09:32,342 INFO [RS:0;5134ffc85563:46771 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T01:09:32,342 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8eb7dabc2efd0fda67941096dba183a1, disabling compactions & flushes 2024-11-19T01:09:32,342 INFO [RS:0;5134ffc85563:46771 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T01:09:32,342 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1. 2024-11-19T01:09:32,342 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1. 2024-11-19T01:09:32,342 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1. after waiting 0 ms 2024-11-19T01:09:32,342 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1. 2024-11-19T01:09:32,342 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 8eb7dabc2efd0fda67941096dba183a1 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-19T01:09:32,345 INFO [RS:0;5134ffc85563:46771 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T01:09:32,345 DEBUG [RS:0;5134ffc85563:46771 {}] regionserver.HRegionServer(1325): Online Regions={8eb7dabc2efd0fda67941096dba183a1=TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1., 1588230740=hbase:meta,,1.1588230740} 2024-11-19T01:09:32,345 DEBUG [RS:0;5134ffc85563:46771 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8eb7dabc2efd0fda67941096dba183a1 2024-11-19T01:09:32,345 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T01:09:32,345 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T01:09:32,346 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T01:09:32,346 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T01:09:32,346 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T01:09:32,346 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-19T01:09:32,346 ERROR [FSHLog-0-hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c-prefix:5134ffc85563,46771,1731978533954.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:32,346 WARN [FSHLog-0-hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c-prefix:5134ffc85563,46771,1731978533954.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:32,347 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5134ffc85563%2C46771%2C1731978533954.meta:.meta(num 1731978534873) roll requested 2024-11-19T01:09:32,347 INFO [regionserver/5134ffc85563:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C46771%2C1731978533954.meta.1731978572347.meta 2024-11-19T01:09:32,351 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:32,351 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:32,351 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:32,351 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:32,351 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:32,356 WARN [Thread-1040 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:32,356 WARN [Thread-1040 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK], DatanodeInfoWithStorage[127.0.0.1:45827,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]) is bad. 2024-11-19T01:09:32,356 WARN [Thread-1040 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741891_1075 2024-11-19T01:09:32,357 WARN [Thread-1040 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK] 2024-11-19T01:09:32,361 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:32,361 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:32,361 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 2024-11-19T01:09:32,362 WARN [IPC Server handler 1 on default port 40963 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 has not been closed. Lease recovery is in progress. RecoveryId = 1077 for block blk_1073741837_1013 2024-11-19T01:09:32,362 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/89697828bc864027b1ea33d8a3173117 is 1080, key is row0015/info:/1731978572086/Put/seqid=0 2024-11-19T01:09:32,364 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 after 3ms 2024-11-19T01:09:32,379 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:32,378 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:35894 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741893_1078] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data4]'}, localName='127.0.0.1:45827', datanodeUuid='bf6c2955-85f5-4c42-a530-315160b9fc3f', xmitsInProgress=0}:Exception transferring block BP-397303664-172.17.0.2-1731978532996:blk_1073741893_1078 to mirror 127.0.0.1:39661 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:32,379 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:32,379 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:32,379 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:35894 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741893_1078] {}] datanode.BlockReceiver(316): Block 1073741893 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T01:09:32,379 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:32,379 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_944575194_22 at /127.0.0.1:35894 [Receiving block BP-397303664-172.17.0.2-1731978532996:blk_1073741893_1078] {}] datanode.DataXceiver(331): 127.0.0.1:45827:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35894 dst: /127.0.0.1:45827 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:32,379 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:32,379 WARN [Thread-1041 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1078 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39661 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:32,380 WARN [Thread-1041 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741893_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45827,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK], DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]) is bad. 2024-11-19T01:09:32,380 WARN [Thread-1041 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741893_1078 2024-11-19T01:09:32,380 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978572347.meta 2024-11-19T01:09:32,380 WARN [Thread-1041 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK] 2024-11-19T01:09:32,381 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:32,381 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39479,DS-e17ca27b-d886-49fb-ae3d-4459f43db400,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:32,382 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta 2024-11-19T01:09:32,382 WARN [IPC Server handler 3 on default port 40963 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta has not been closed. Lease recovery is in progress. RecoveryId = 1079 for block blk_1073741834_1010 2024-11-19T01:09:32,383 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta after 1ms 2024-11-19T01:09:32,408 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41485:41485),(127.0.0.1/127.0.0.1:36313:36313)] 2024-11-19T01:09:32,408 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta is not closed yet, will try archiving it next time 2024-11-19T01:09:32,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45827 is added to blk_1073741894_1080 (size=14660) 2024-11-19T01:09:32,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741894_1080 (size=14660) 2024-11-19T01:09:32,421 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/89697828bc864027b1ea33d8a3173117 2024-11-19T01:09:32,432 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/.tmp/info/89697828bc864027b1ea33d8a3173117 as hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/89697828bc864027b1ea33d8a3173117 2024-11-19T01:09:32,439 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740/.tmp/info/1c1212954b7040c99144ba6d34c11199 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1./info:regioninfo/1731978535712/Put/seqid=0 2024-11-19T01:09:32,442 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/89697828bc864027b1ea33d8a3173117, entries=9, sequenceid=78, filesize=14.3 K 2024-11-19T01:09:32,444 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 8eb7dabc2efd0fda67941096dba183a1 in 102ms, sequenceid=78, compaction requested=true 2024-11-19T01:09:32,446 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/f4ffc962ee4b49508c0d39e0147e69f0, hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/2c951c3369ed4cd48eb55e23a15cba52, hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/66b458c548f2493bb3c47a4f7e52fd26, hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/c71fc00262db4f389f8380450b431d6e, hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/c709f69821464bf7b5946df4947221ea, hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/7e6102a0dfa54389b501810f02407333] to archive 2024-11-19T01:09:32,447 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T01:09:32,450 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/f4ffc962ee4b49508c0d39e0147e69f0 to hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/f4ffc962ee4b49508c0d39e0147e69f0 2024-11-19T01:09:32,452 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/2c951c3369ed4cd48eb55e23a15cba52 to hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/2c951c3369ed4cd48eb55e23a15cba52 2024-11-19T01:09:32,454 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/66b458c548f2493bb3c47a4f7e52fd26 to hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/66b458c548f2493bb3c47a4f7e52fd26 2024-11-19T01:09:32,456 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/c71fc00262db4f389f8380450b431d6e to hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/c71fc00262db4f389f8380450b431d6e 2024-11-19T01:09:32,458 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/c709f69821464bf7b5946df4947221ea to hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/c709f69821464bf7b5946df4947221ea 2024-11-19T01:09:32,460 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/7e6102a0dfa54389b501810f02407333 to hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/info/7e6102a0dfa54389b501810f02407333 2024-11-19T01:09:32,461 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5134ffc85563:35651 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-19T01:09:32,461 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [f4ffc962ee4b49508c0d39e0147e69f0=10347, 2c951c3369ed4cd48eb55e23a15cba52=12506, 66b458c548f2493bb3c47a4f7e52fd26=17994, c71fc00262db4f389f8380450b431d6e=6027, c709f69821464bf7b5946df4947221ea=6027, 7e6102a0dfa54389b501810f02407333=6027] 2024-11-19T01:09:32,484 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.1731978556172 to hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/oldWALs/5134ffc85563%2C46771%2C1731978533954.1731978556172 2024-11-19T01:09:32,505 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8eb7dabc2efd0fda67941096dba183a1/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-11-19T01:09:32,506 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1. 2024-11-19T01:09:32,506 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8eb7dabc2efd0fda67941096dba183a1: Waiting for close lock at 1731978572341Running coprocessor pre-close hooks at 1731978572341Disabling compacts and flushes for region at 1731978572341Disabling writes for close at 1731978572342 (+1 ms)Obtaining lock to block concurrent updates at 1731978572342Preparing flush snapshotting stores in 8eb7dabc2efd0fda67941096dba183a1 at 1731978572342Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1731978572342Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1. at 1731978572348 (+6 ms)Flushing 8eb7dabc2efd0fda67941096dba183a1/info: creating writer at 1731978572349 (+1 ms)Flushing 8eb7dabc2efd0fda67941096dba183a1/info: appending metadata at 1731978572362 (+13 ms)Flushing 8eb7dabc2efd0fda67941096dba183a1/info: closing flushed file at 1731978572362Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@688fa184: reopening flushed file at 1731978572431 (+69 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 8eb7dabc2efd0fda67941096dba183a1 in 102ms, sequenceid=78, compaction requested=true at 1731978572444 (+13 ms)Writing region close event to WAL at 1731978572486 (+42 ms)Running coprocessor post-close hooks at 1731978572506 (+20 ms)Closed at 1731978572506 2024-11-19T01:09:32,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45827 is added to blk_1073741895_1081 (size=7089) 2024-11-19T01:09:32,506 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731978535301.8eb7dabc2efd0fda67941096dba183a1. 2024-11-19T01:09:32,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741895_1081 (size=7089) 2024-11-19T01:09:32,507 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740/.tmp/info/1c1212954b7040c99144ba6d34c11199 2024-11-19T01:09:32,538 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740/.tmp/ns/d091ff8a1f4248f69edf5be4980b4f9f is 43, key is default/ns:d/1731978534968/Put/seqid=0 2024-11-19T01:09:32,545 DEBUG [RS:0;5134ffc85563:46771 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-19T01:09:32,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741896_1082 (size=5153) 2024-11-19T01:09:32,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45827 is added to blk_1073741896_1082 (size=5153) 2024-11-19T01:09:32,555 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740/.tmp/ns/d091ff8a1f4248f69edf5be4980b4f9f 2024-11-19T01:09:32,582 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740/.tmp/table/672e08ced278485580eee7d0b29caaff is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731978535733/Put/seqid=0 2024-11-19T01:09:32,585 WARN [Thread-1067 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:32,585 WARN [Thread-1067 {}] hdfs.DataStreamer(1731): Error Recovery for BP-397303664-172.17.0.2-1731978532996:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK], DatanodeInfoWithStorage[127.0.0.1:45827,DS-03f6f93e-5e80-4a4e-a2c4-eb90b0d52a0b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK]) is bad. 2024-11-19T01:09:32,585 WARN [Thread-1067 {}] hdfs.DataStreamer(1850): Abandoning BP-397303664-172.17.0.2-1731978532996:blk_1073741897_1083 2024-11-19T01:09:32,588 WARN [Thread-1067 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39661,DS-5be2ab8f-9145-4622-bba0-f414e758fe7a,DISK] 2024-11-19T01:09:32,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45827 is added to blk_1073741898_1084 (size=5424) 2024-11-19T01:09:32,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741898_1084 (size=5424) 2024-11-19T01:09:32,602 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740/.tmp/table/672e08ced278485580eee7d0b29caaff 2024-11-19T01:09:32,612 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740/.tmp/info/1c1212954b7040c99144ba6d34c11199 as hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740/info/1c1212954b7040c99144ba6d34c11199 2024-11-19T01:09:32,621 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740/info/1c1212954b7040c99144ba6d34c11199, entries=10, sequenceid=11, filesize=6.9 K 2024-11-19T01:09:32,623 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740/.tmp/ns/d091ff8a1f4248f69edf5be4980b4f9f as hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740/ns/d091ff8a1f4248f69edf5be4980b4f9f 2024-11-19T01:09:32,650 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740/ns/d091ff8a1f4248f69edf5be4980b4f9f, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T01:09:32,652 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740/.tmp/table/672e08ced278485580eee7d0b29caaff as hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740/table/672e08ced278485580eee7d0b29caaff 2024-11-19T01:09:32,664 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740/table/672e08ced278485580eee7d0b29caaff, entries=2, sequenceid=11, filesize=5.3 K 2024-11-19T01:09:32,667 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 321ms, sequenceid=11, compaction requested=false 2024-11-19T01:09:32,718 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T01:09:32,719 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T01:09:32,720 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T01:09:32,720 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731978572345Running coprocessor pre-close hooks at 1731978572345Disabling compacts and flushes for region at 1731978572345Disabling writes for close at 1731978572346 (+1 ms)Obtaining lock to block concurrent updates at 1731978572346Preparing flush snapshotting stores in 1588230740 at 1731978572346Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731978572347 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731978572410 (+63 ms)Flushing 1588230740/info: creating writer at 1731978572410Flushing 1588230740/info: appending metadata at 1731978572438 (+28 ms)Flushing 1588230740/info: closing flushed file at 1731978572438Flushing 1588230740/ns: creating writer at 1731978572515 (+77 ms)Flushing 1588230740/ns: appending metadata at 1731978572537 (+22 ms)Flushing 1588230740/ns: closing flushed file at 1731978572537Flushing 1588230740/table: creating writer at 1731978572562 (+25 ms)Flushing 1588230740/table: appending metadata at 1731978572582 (+20 ms)Flushing 1588230740/table: closing flushed file at 1731978572582Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@625ce1ec: reopening flushed file at 1731978572611 (+29 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@398f9370: reopening flushed file at 1731978572621 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f541ef4: reopening flushed file at 1731978572651 (+30 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 321ms, sequenceid=11, compaction requested=false at 1731978572667 (+16 ms)Writing region close event to WAL at 1731978572697 (+30 ms)Running coprocessor post-close hooks at 1731978572719 (+22 ms)Closed at 1731978572720 (+1 ms) 2024-11-19T01:09:32,721 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T01:09:32,746 INFO [RS:0;5134ffc85563:46771 {}] regionserver.HRegionServer(976): stopping server 5134ffc85563,46771,1731978533954; all regions closed. 2024-11-19T01:09:32,746 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:32,746 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:32,747 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:32,747 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:32,747 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:32,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45827 is added to blk_1073741892_1076 (size=825) 2024-11-19T01:09:32,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741892_1076 (size=825) 2024-11-19T01:09:32,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45827 is added to blk_1073741874_1057 (size=12911) 2024-11-19T01:09:33,245 INFO [regionserver/5134ffc85563:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T01:09:33,245 INFO [regionserver/5134ffc85563:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T01:09:33,247 INFO [regionserver/5134ffc85563:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T01:09:33,375 INFO [regionserver/5134ffc85563:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T01:09:33,375 INFO [regionserver/5134ffc85563:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T01:09:33,521 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@36b6199f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45827, datanodeUuid=bf6c2955-85f5-4c42-a530-315160b9fc3f, infoPort=36313, infoSecurePort=0, ipcPort=37831, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996):Failed to transfer BP-397303664-172.17.0.2-1731978532996:blk_1073741828_1004 to 127.0.0.1:39661 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:33,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741832_1008 (size=32) 2024-11-19T01:09:34,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-19T01:09:34,320 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T01:09:34,320 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T01:09:34,324 INFO [regionserver/5134ffc85563:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T01:09:34,522 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@36b6199f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45827, datanodeUuid=bf6c2955-85f5-4c42-a530-315160b9fc3f, infoPort=36313, infoSecurePort=0, ipcPort=37831, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996):Failed to transfer BP-397303664-172.17.0.2-1731978532996:blk_1073741833_1019 to 127.0.0.1:39661 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:34,522 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@c2dd6d3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45827, datanodeUuid=bf6c2955-85f5-4c42-a530-315160b9fc3f, infoPort=36313, infoSecurePort=0, ipcPort=37831, storageInfo=lv=-57;cid=testClusterID;nsid=828015794;c=1731978532996):Failed to transfer BP-397303664-172.17.0.2-1731978532996:blk_1073741826_1002 to 127.0.0.1:39661 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:35,298 INFO [master/5134ffc85563:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T01:09:35,298 INFO [master/5134ffc85563:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T01:09:35,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741825_1001 (size=7) 2024-11-19T01:09:35,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741836_1012 (size=76) 2024-11-19T01:09:36,365 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 after 4004ms 2024-11-19T01:09:36,384 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta after 4002ms 2024-11-19T01:09:37,361 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-19T01:09:37,366 DEBUG [RS:1;5134ffc85563:43549 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/oldWALs 2024-11-19T01:09:37,366 INFO [RS:1;5134ffc85563:43549 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5134ffc85563%2C43549%2C1731978535122:(num 1731978535398) 2024-11-19T01:09:37,366 DEBUG [RS:1;5134ffc85563:43549 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:09:37,366 INFO [RS:1;5134ffc85563:43549 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T01:09:37,367 INFO [RS:1;5134ffc85563:43549 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T01:09:37,367 INFO [RS:1;5134ffc85563:43549 {}] hbase.ChoreService(370): Chore service for: regionserver/5134ffc85563:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T01:09:37,367 INFO [RS:1;5134ffc85563:43549 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T01:09:37,367 INFO [RS:1;5134ffc85563:43549 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T01:09:37,367 INFO [RS:1;5134ffc85563:43549 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T01:09:37,367 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T01:09:37,367 INFO [RS:1;5134ffc85563:43549 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T01:09:37,367 INFO [RS:1;5134ffc85563:43549 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43549 2024-11-19T01:09:37,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:37,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T01:09:37,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43549-0x101088a107c0002, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5134ffc85563,43549,1731978535122 2024-11-19T01:09:37,375 INFO [RS:1;5134ffc85563:43549 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T01:09:37,376 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5134ffc85563,43549,1731978535122] 2024-11-19T01:09:37,379 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5134ffc85563,43549,1731978535122 already deleted, retry=false 2024-11-19T01:09:37,379 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5134ffc85563,43549,1731978535122 expired; onlineServers=1 2024-11-19T01:09:37,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43549-0x101088a107c0002, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:09:37,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43549-0x101088a107c0002, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:09:37,482 INFO [RS:1;5134ffc85563:43549 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T01:09:37,482 INFO [RS:1;5134ffc85563:43549 {}] regionserver.HRegionServer(1031): Exiting; stopping=5134ffc85563,43549,1731978535122; zookeeper connection closed. 2024-11-19T01:09:37,484 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@670e30b3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@670e30b3 2024-11-19T01:09:37,507 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:37,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:37,532 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:37,532 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:37,532 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:37,533 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:37,545 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:37,547 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:37,748 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-19T01:09:37,752 DEBUG [RS:0;5134ffc85563:46771 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/oldWALs 2024-11-19T01:09:37,752 INFO [RS:0;5134ffc85563:46771 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5134ffc85563%2C46771%2C1731978533954.meta:.meta(num 1731978572347) 2024-11-19T01:09:37,753 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:37,753 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:37,753 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:37,754 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:37,754 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:37,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741888_1072 (size=14682) 2024-11-19T01:09:37,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45827 is added to blk_1073741888_1072 (size=14682) 2024-11-19T01:09:37,759 DEBUG [RS:0;5134ffc85563:46771 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/oldWALs 2024-11-19T01:09:37,759 INFO [RS:0;5134ffc85563:46771 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5134ffc85563%2C46771%2C1731978533954:(num 1731978572064) 2024-11-19T01:09:37,759 DEBUG [RS:0;5134ffc85563:46771 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:09:37,759 INFO [RS:0;5134ffc85563:46771 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T01:09:37,759 INFO [RS:0;5134ffc85563:46771 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T01:09:37,760 INFO [RS:0;5134ffc85563:46771 {}] hbase.ChoreService(370): Chore service for: regionserver/5134ffc85563:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T01:09:37,760 INFO [RS:0;5134ffc85563:46771 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T01:09:37,760 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T01:09:37,760 INFO [RS:0;5134ffc85563:46771 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46771 2024-11-19T01:09:37,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5134ffc85563,46771,1731978533954 2024-11-19T01:09:37,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T01:09:37,762 INFO [RS:0;5134ffc85563:46771 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T01:09:37,764 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5134ffc85563,46771,1731978533954] 2024-11-19T01:09:37,765 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5134ffc85563,46771,1731978533954 already deleted, retry=false 2024-11-19T01:09:37,765 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5134ffc85563,46771,1731978533954 expired; onlineServers=0 2024-11-19T01:09:37,766 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5134ffc85563,35651,1731978533878' ***** 2024-11-19T01:09:37,766 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T01:09:37,766 INFO [M:0;5134ffc85563:35651 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T01:09:37,766 INFO [M:0;5134ffc85563:35651 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T01:09:37,766 DEBUG [M:0;5134ffc85563:35651 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T01:09:37,766 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T01:09:37,766 DEBUG [M:0;5134ffc85563:35651 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T01:09:37,766 DEBUG [master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978534216 {}] cleaner.HFileCleaner(306): Exit Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978534216,5,FailOnTimeoutGroup] 2024-11-19T01:09:37,766 DEBUG [master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978534216 {}] cleaner.HFileCleaner(306): Exit Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978534216,5,FailOnTimeoutGroup] 2024-11-19T01:09:37,766 INFO [M:0;5134ffc85563:35651 {}] hbase.ChoreService(370): Chore service for: master/5134ffc85563:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T01:09:37,766 INFO [M:0;5134ffc85563:35651 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T01:09:37,767 DEBUG [M:0;5134ffc85563:35651 {}] master.HMaster(1795): Stopping service threads 2024-11-19T01:09:37,767 INFO [M:0;5134ffc85563:35651 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T01:09:37,767 INFO [M:0;5134ffc85563:35651 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T01:09:37,767 INFO [M:0;5134ffc85563:35651 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T01:09:37,767 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T01:09:37,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T01:09:37,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:09:37,768 DEBUG [M:0;5134ffc85563:35651 {}] zookeeper.ZKUtil(347): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T01:09:37,768 WARN [M:0;5134ffc85563:35651 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T01:09:37,769 INFO [M:0;5134ffc85563:35651 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/.lastflushedseqids 2024-11-19T01:09:37,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741899_1085 (size=130) 2024-11-19T01:09:37,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45827 is added to blk_1073741899_1085 (size=130) 2024-11-19T01:09:37,780 INFO [M:0;5134ffc85563:35651 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T01:09:37,780 INFO [M:0;5134ffc85563:35651 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T01:09:37,781 DEBUG [M:0;5134ffc85563:35651 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T01:09:37,781 INFO [M:0;5134ffc85563:35651 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:09:37,781 DEBUG [M:0;5134ffc85563:35651 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:09:37,781 DEBUG [M:0;5134ffc85563:35651 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T01:09:37,781 DEBUG [M:0;5134ffc85563:35651 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:09:37,781 INFO [M:0;5134ffc85563:35651 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-19T01:09:37,804 DEBUG [M:0;5134ffc85563:35651 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/49d0d393c29e44c59f516273b13d9a67 is 82, key is hbase:meta,,1/info:regioninfo/1731978534947/Put/seqid=0 2024-11-19T01:09:37,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741900_1086 (size=5672) 2024-11-19T01:09:37,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45827 is added to blk_1073741900_1086 (size=5672) 2024-11-19T01:09:37,818 INFO [M:0;5134ffc85563:35651 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/49d0d393c29e44c59f516273b13d9a67 2024-11-19T01:09:37,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:09:37,864 INFO [RS:0;5134ffc85563:46771 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T01:09:37,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46771-0x101088a107c0001, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:09:37,864 INFO [RS:0;5134ffc85563:46771 {}] regionserver.HRegionServer(1031): Exiting; stopping=5134ffc85563,46771,1731978533954; zookeeper connection closed. 2024-11-19T01:09:37,868 DEBUG [M:0;5134ffc85563:35651 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f3a725c462f34842978dab918e690886 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731978535740/Put/seqid=0 2024-11-19T01:09:37,881 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4e78b869 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4e78b869 2024-11-19T01:09:37,882 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-19T01:09:37,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741901_1087 (size=6255) 2024-11-19T01:09:37,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45827 is added to blk_1073741901_1087 (size=6255) 2024-11-19T01:09:37,901 INFO [M:0;5134ffc85563:35651 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f3a725c462f34842978dab918e690886 2024-11-19T01:09:37,926 INFO [M:0;5134ffc85563:35651 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f3a725c462f34842978dab918e690886 2024-11-19T01:09:37,955 DEBUG [M:0;5134ffc85563:35651 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9ba4b63df1894e5fbc7bb67b90c1874d is 69, key is 5134ffc85563,43549,1731978535122/rs:state/1731978535209/Put/seqid=0 2024-11-19T01:09:38,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45827 is added to blk_1073741902_1088 (size=5224) 2024-11-19T01:09:38,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741902_1088 (size=5224) 2024-11-19T01:09:38,002 INFO [M:0;5134ffc85563:35651 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9ba4b63df1894e5fbc7bb67b90c1874d 2024-11-19T01:09:38,045 DEBUG [M:0;5134ffc85563:35651 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6c2c6aa734f5495296a8fec043f7cf44 is 52, key is load_balancer_on/state:d/1731978535103/Put/seqid=0 2024-11-19T01:09:38,050 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T01:09:38,074 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:38,075 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:38,075 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:38,076 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:38,076 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:38,077 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:38,082 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:38,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45827 is added to blk_1073741903_1089 (size=5056) 2024-11-19T01:09:38,086 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:38,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741903_1089 (size=5056) 2024-11-19T01:09:38,089 INFO [M:0;5134ffc85563:35651 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6c2c6aa734f5495296a8fec043f7cf44 2024-11-19T01:09:38,099 DEBUG [M:0;5134ffc85563:35651 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/49d0d393c29e44c59f516273b13d9a67 as hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/49d0d393c29e44c59f516273b13d9a67 2024-11-19T01:09:38,107 INFO [M:0;5134ffc85563:35651 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/49d0d393c29e44c59f516273b13d9a67, entries=8, sequenceid=60, filesize=5.5 K 2024-11-19T01:09:38,108 DEBUG [M:0;5134ffc85563:35651 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f3a725c462f34842978dab918e690886 as hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f3a725c462f34842978dab918e690886 2024-11-19T01:09:38,123 INFO [M:0;5134ffc85563:35651 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f3a725c462f34842978dab918e690886 2024-11-19T01:09:38,123 INFO [M:0;5134ffc85563:35651 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f3a725c462f34842978dab918e690886, entries=6, sequenceid=60, filesize=6.1 K 2024-11-19T01:09:38,125 DEBUG [M:0;5134ffc85563:35651 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9ba4b63df1894e5fbc7bb67b90c1874d as hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9ba4b63df1894e5fbc7bb67b90c1874d 2024-11-19T01:09:38,134 INFO [M:0;5134ffc85563:35651 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9ba4b63df1894e5fbc7bb67b90c1874d, entries=2, sequenceid=60, filesize=5.1 K 2024-11-19T01:09:38,136 DEBUG [M:0;5134ffc85563:35651 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6c2c6aa734f5495296a8fec043f7cf44 as hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6c2c6aa734f5495296a8fec043f7cf44 2024-11-19T01:09:38,144 INFO [M:0;5134ffc85563:35651 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6c2c6aa734f5495296a8fec043f7cf44, entries=1, sequenceid=60, filesize=4.9 K 2024-11-19T01:09:38,145 INFO [M:0;5134ffc85563:35651 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 364ms, sequenceid=60, compaction requested=false 2024-11-19T01:09:38,192 INFO [M:0;5134ffc85563:35651 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:09:38,192 DEBUG [M:0;5134ffc85563:35651 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731978577780Disabling compacts and flushes for region at 1731978577780Disabling writes for close at 1731978577781 (+1 ms)Obtaining lock to block concurrent updates at 1731978577781Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731978577781Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731978577782 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731978577782Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731978577782Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731978577803 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731978577803Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731978577841 (+38 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731978577867 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731978577867Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731978577927 (+60 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731978577954 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731978577954Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731978578011 (+57 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731978578044 (+33 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731978578044Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@213da61: reopening flushed file at 1731978578098 (+54 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f74e882: reopening flushed file at 1731978578107 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4300b548: reopening flushed file at 1731978578124 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@b0b0d1e: reopening flushed file at 1731978578134 (+10 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 364ms, sequenceid=60, compaction requested=false at 1731978578145 (+11 ms)Writing region close event to WAL at 1731978578192 (+47 ms)Closed at 1731978578192 2024-11-19T01:09:38,197 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:38,197 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:38,197 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:38,197 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:38,198 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:38,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41521 is added to blk_1073741886_1069 (size=1045) 2024-11-19T01:09:38,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45827 is added to blk_1073741886_1069 (size=1045) 2024-11-19T01:09:38,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:38,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:38,551 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4e2b5fa0 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-397303664-172.17.0.2-1731978532996:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:39479,null,null]) java.net.ConnectException: Call From 5134ffc85563/172.17.0.2 to localhost:40121 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-19T01:09:39,240 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/WALs/5134ffc85563,35651,1731978533878/5134ffc85563%2C35651%2C1731978533878.1731978534127 to hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/oldWALs/5134ffc85563%2C35651%2C1731978533878.1731978534127 2024-11-19T01:09:39,244 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/MasterData/oldWALs/5134ffc85563%2C35651%2C1731978533878.1731978534127 to hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/oldWALs/5134ffc85563%2C35651%2C1731978533878.1731978534127$masterlocalwal$ 2024-11-19T01:09:39,244 INFO [M:0;5134ffc85563:35651 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T01:09:39,245 INFO [M:0;5134ffc85563:35651 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35651 2024-11-19T01:09:39,245 INFO [M:0;5134ffc85563:35651 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T01:09:39,245 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T01:09:39,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:09:39,348 INFO [M:0;5134ffc85563:35651 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T01:09:39,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35651-0x101088a107c0000, quorum=127.0.0.1:61906, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:09:39,351 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@47bbe019{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:09:39,351 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6f8ca33c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:09:39,351 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:09:39,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c1d8e25{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:09:39,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2852206a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/hadoop.log.dir/,STOPPED} 2024-11-19T01:09:39,354 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5d753116 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-397303664-172.17.0.2-1731978532996:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:39479,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:40121 , LocalHost:localPort 5134ffc85563/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-19T01:09:39,354 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:09:39,354 WARN [BP-397303664-172.17.0.2-1731978532996 heartbeating to localhost/127.0.0.1:40963 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:09:39,354 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:09:39,354 WARN [BP-397303664-172.17.0.2-1731978532996 heartbeating to localhost/127.0.0.1:40963 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-397303664-172.17.0.2-1731978532996 (Datanode Uuid bf6c2955-85f5-4c42-a530-315160b9fc3f) service to localhost/127.0.0.1:40963 2024-11-19T01:09:39,354 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5d753116 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-397303664-172.17.0.2-1731978532996:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:39479,null,null], DatanodeInfoWithStorage[127.0.0.1:45827,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-397303664-172.17.0.2-1731978532996 2024-11-19T01:09:39,355 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5d753116 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-397303664-172.17.0.2-1731978532996:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:45827,null,null]) java.io.IOException: No block pool offer service for bpid=BP-397303664-172.17.0.2-1731978532996 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:39,355 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5d753116 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-397303664-172.17.0.2-1731978532996:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:39479,null,null]) java.io.IOException: No block pool offer service for bpid=BP-397303664-172.17.0.2-1731978532996 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:39,355 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5d753116 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-397303664-172.17.0.2-1731978532996:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:45827,null,null], DatanodeInfoWithStorage[127.0.0.1:39479,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-397303664-172.17.0.2-1731978532996:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:45827,null,null], DatanodeInfoWithStorage[127.0.0.1:39479,null,null]] 2024-11-19T01:09:39,355 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data3/current/BP-397303664-172.17.0.2-1731978532996 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:09:39,356 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data4/current/BP-397303664-172.17.0.2-1731978532996 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:09:39,356 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:09:39,359 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1d80e9a4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:09:39,360 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2c590722{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:09:39,360 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:09:39,360 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c5e4864{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:09:39,360 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ca4b7c9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/hadoop.log.dir/,STOPPED} 2024-11-19T01:09:39,362 WARN [BP-397303664-172.17.0.2-1731978532996 heartbeating to localhost/127.0.0.1:40963 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:09:39,362 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:09:39,362 WARN [BP-397303664-172.17.0.2-1731978532996 heartbeating to localhost/127.0.0.1:40963 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-397303664-172.17.0.2-1731978532996 (Datanode Uuid da93b273-27b3-4855-a317-6873de7cd5f5) service to localhost/127.0.0.1:40963 2024-11-19T01:09:39,362 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:09:39,363 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data7/current/BP-397303664-172.17.0.2-1731978532996 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:09:39,363 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/cluster_d1059538-4cef-eb18-64cb-7bb2fcc78c73/data/data8/current/BP-397303664-172.17.0.2-1731978532996 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:09:39,364 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:09:39,372 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c00ef51{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T01:09:39,373 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@73b9709e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:09:39,373 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:09:39,373 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42b52d44{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:09:39,374 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@142d24a0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/hadoop.log.dir/,STOPPED} 2024-11-19T01:09:39,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:39,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:39,386 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T01:09:39,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T01:09:39,435 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 78) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007fa30cbefdc8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40963 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:39165 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40963 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:40963 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40963 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39165 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:40963 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40963 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40963 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007fa30cbefdc8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40963 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40963 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40963 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40963 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=274 (was 164) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4192 (was 5036) 2024-11-19T01:09:39,445 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=274, ProcessCount=11, AvailableMemoryMB=4192 2024-11-19T01:09:39,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T01:09:39,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/hadoop.log.dir so I do NOT create it in target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9 2024-11-19T01:09:39,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e20fa4b1-3dc1-4372-93f8-09002221cabd/hadoop.tmp.dir so I do NOT create it in target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9 2024-11-19T01:09:39,446 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/cluster_5bcfa593-45b8-16fe-ae11-8157ba1ac254, deleteOnExit=true 2024-11-19T01:09:39,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T01:09:39,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/test.cache.data in system properties and HBase conf 2024-11-19T01:09:39,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T01:09:39,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/hadoop.log.dir in system properties and HBase conf 2024-11-19T01:09:39,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T01:09:39,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T01:09:39,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T01:09:39,447 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T01:09:39,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T01:09:39,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T01:09:39,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T01:09:39,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T01:09:39,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T01:09:39,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T01:09:39,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T01:09:39,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T01:09:39,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T01:09:39,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/nfs.dump.dir in system properties and HBase conf 2024-11-19T01:09:39,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/java.io.tmpdir in system properties and HBase conf 2024-11-19T01:09:39,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T01:09:39,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T01:09:39,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T01:09:39,467 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T01:09:39,549 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:09:39,554 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:09:39,556 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:09:39,556 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:09:39,556 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T01:09:39,557 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:09:39,558 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f1af061{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:09:39,558 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a3a743f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:09:39,713 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@fb6244e{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/java.io.tmpdir/jetty-localhost-41939-hadoop-hdfs-3_4_1-tests_jar-_-any-9127676459871090152/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T01:09:39,716 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1915705e{HTTP/1.1, (http/1.1)}{localhost:41939} 2024-11-19T01:09:39,716 INFO [Time-limited test {}] server.Server(415): Started @151012ms 2024-11-19T01:09:39,735 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T01:09:39,863 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:09:39,869 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:09:39,871 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:09:39,871 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:09:39,871 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T01:09:39,875 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ff32a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:09:39,875 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@354edf1e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:09:40,040 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@482b04db{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/java.io.tmpdir/jetty-localhost-41633-hadoop-hdfs-3_4_1-tests_jar-_-any-7045219991743947758/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:09:40,040 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5cc14744{HTTP/1.1, (http/1.1)}{localhost:41633} 2024-11-19T01:09:40,041 INFO [Time-limited test {}] server.Server(415): Started @151337ms 2024-11-19T01:09:40,043 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:09:40,125 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:09:40,129 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:09:40,149 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:09:40,149 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:09:40,149 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T01:09:40,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@708201bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:09:40,151 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11b4bf4f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:09:40,191 WARN [Thread-1187 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/cluster_5bcfa593-45b8-16fe-ae11-8157ba1ac254/data/data2/current/BP-1068624987-172.17.0.2-1731978579487/current, will proceed with Du for space computation calculation, 2024-11-19T01:09:40,191 WARN [Thread-1186 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/cluster_5bcfa593-45b8-16fe-ae11-8157ba1ac254/data/data1/current/BP-1068624987-172.17.0.2-1731978579487/current, will proceed with Du for space computation calculation, 2024-11-19T01:09:40,214 WARN [Thread-1165 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:09:40,216 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf7b4eee664ea1d59 with lease ID 0x5ae1cba4fe4df1df: Processing first storage report for DS-74f4d546-c1be-4454-8688-ac7cce9b14fa from datanode DatanodeRegistration(127.0.0.1:40001, datanodeUuid=d0d06abc-686e-4ffd-af0f-d2b6f11995bc, infoPort=46093, infoSecurePort=0, ipcPort=42783, storageInfo=lv=-57;cid=testClusterID;nsid=33876494;c=1731978579487) 2024-11-19T01:09:40,216 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf7b4eee664ea1d59 with lease ID 0x5ae1cba4fe4df1df: from storage DS-74f4d546-c1be-4454-8688-ac7cce9b14fa node DatanodeRegistration(127.0.0.1:40001, datanodeUuid=d0d06abc-686e-4ffd-af0f-d2b6f11995bc, infoPort=46093, infoSecurePort=0, ipcPort=42783, storageInfo=lv=-57;cid=testClusterID;nsid=33876494;c=1731978579487), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:09:40,217 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf7b4eee664ea1d59 with lease ID 0x5ae1cba4fe4df1df: Processing first storage report for DS-1ca14107-c1f5-49e6-b1b6-3b373ad17050 from datanode DatanodeRegistration(127.0.0.1:40001, datanodeUuid=d0d06abc-686e-4ffd-af0f-d2b6f11995bc, infoPort=46093, infoSecurePort=0, ipcPort=42783, storageInfo=lv=-57;cid=testClusterID;nsid=33876494;c=1731978579487) 2024-11-19T01:09:40,217 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf7b4eee664ea1d59 with lease ID 0x5ae1cba4fe4df1df: from storage DS-1ca14107-c1f5-49e6-b1b6-3b373ad17050 node DatanodeRegistration(127.0.0.1:40001, datanodeUuid=d0d06abc-686e-4ffd-af0f-d2b6f11995bc, infoPort=46093, infoSecurePort=0, ipcPort=42783, storageInfo=lv=-57;cid=testClusterID;nsid=33876494;c=1731978579487), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:09:40,309 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2c71ff07{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/java.io.tmpdir/jetty-localhost-37175-hadoop-hdfs-3_4_1-tests_jar-_-any-9302566137707232214/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:09:40,309 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1a9fc515{HTTP/1.1, (http/1.1)}{localhost:37175} 2024-11-19T01:09:40,309 INFO [Time-limited test {}] server.Server(415): Started @151605ms 2024-11-19T01:09:40,313 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:09:40,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:40,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:40,417 WARN [Thread-1212 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/cluster_5bcfa593-45b8-16fe-ae11-8157ba1ac254/data/data3/current/BP-1068624987-172.17.0.2-1731978579487/current, will proceed with Du for space computation calculation, 2024-11-19T01:09:40,418 WARN [Thread-1213 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/cluster_5bcfa593-45b8-16fe-ae11-8157ba1ac254/data/data4/current/BP-1068624987-172.17.0.2-1731978579487/current, will proceed with Du for space computation calculation, 2024-11-19T01:09:40,443 WARN [Thread-1201 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:09:40,446 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7f1548a98ea8898e with lease ID 0x5ae1cba4fe4df1e0: Processing first storage report for DS-6915a258-9e19-42b4-aae1-48f3a5f0542c from datanode DatanodeRegistration(127.0.0.1:35567, datanodeUuid=54b8e1eb-7bf8-4760-a6c7-b7e0150a1a88, infoPort=38021, infoSecurePort=0, ipcPort=38435, storageInfo=lv=-57;cid=testClusterID;nsid=33876494;c=1731978579487) 2024-11-19T01:09:40,446 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7f1548a98ea8898e with lease ID 0x5ae1cba4fe4df1e0: from storage DS-6915a258-9e19-42b4-aae1-48f3a5f0542c node DatanodeRegistration(127.0.0.1:35567, datanodeUuid=54b8e1eb-7bf8-4760-a6c7-b7e0150a1a88, infoPort=38021, infoSecurePort=0, ipcPort=38435, storageInfo=lv=-57;cid=testClusterID;nsid=33876494;c=1731978579487), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:09:40,446 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7f1548a98ea8898e with lease ID 0x5ae1cba4fe4df1e0: Processing first storage report for DS-5bc4d2fe-2d03-490b-8540-6f0185f950b9 from datanode DatanodeRegistration(127.0.0.1:35567, datanodeUuid=54b8e1eb-7bf8-4760-a6c7-b7e0150a1a88, infoPort=38021, infoSecurePort=0, ipcPort=38435, storageInfo=lv=-57;cid=testClusterID;nsid=33876494;c=1731978579487) 2024-11-19T01:09:40,446 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7f1548a98ea8898e with lease ID 0x5ae1cba4fe4df1e0: from storage DS-5bc4d2fe-2d03-490b-8540-6f0185f950b9 node DatanodeRegistration(127.0.0.1:35567, datanodeUuid=54b8e1eb-7bf8-4760-a6c7-b7e0150a1a88, infoPort=38021, infoSecurePort=0, ipcPort=38435, storageInfo=lv=-57;cid=testClusterID;nsid=33876494;c=1731978579487), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:09:40,460 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9 2024-11-19T01:09:40,467 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/cluster_5bcfa593-45b8-16fe-ae11-8157ba1ac254/zookeeper_0, clientPort=57473, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/cluster_5bcfa593-45b8-16fe-ae11-8157ba1ac254/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/cluster_5bcfa593-45b8-16fe-ae11-8157ba1ac254/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T01:09:40,469 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57473 2024-11-19T01:09:40,470 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:09:40,472 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:09:40,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35567 is added to blk_1073741825_1001 (size=7) 2024-11-19T01:09:40,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_1073741825_1001 (size=7) 2024-11-19T01:09:40,489 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d with version=8 2024-11-19T01:09:40,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/hbase-staging 2024-11-19T01:09:40,492 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5134ffc85563:0 server-side Connection retries=45 2024-11-19T01:09:40,492 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:09:40,492 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T01:09:40,492 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T01:09:40,492 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:09:40,492 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T01:09:40,492 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T01:09:40,492 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T01:09:40,493 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36495 2024-11-19T01:09:40,496 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36495 connecting to ZooKeeper ensemble=127.0.0.1:57473 2024-11-19T01:09:40,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:364950x0, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T01:09:40,515 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36495-0x101088ac6990000 connected 2024-11-19T01:09:40,585 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:09:40,587 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:09:40,591 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:09:40,592 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d, hbase.cluster.distributed=false 2024-11-19T01:09:40,595 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T01:09:40,600 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36495 2024-11-19T01:09:40,600 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36495 2024-11-19T01:09:40,606 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36495 2024-11-19T01:09:40,607 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36495 2024-11-19T01:09:40,607 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36495 2024-11-19T01:09:40,634 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5134ffc85563:0 server-side Connection retries=45 2024-11-19T01:09:40,634 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:09:40,634 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T01:09:40,634 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T01:09:40,634 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:09:40,634 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T01:09:40,634 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T01:09:40,635 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T01:09:40,651 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41477 2024-11-19T01:09:40,653 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41477 connecting to ZooKeeper ensemble=127.0.0.1:57473 2024-11-19T01:09:40,656 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:09:40,660 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:09:40,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:414770x0, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T01:09:40,679 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:414770x0, quorum=127.0.0.1:57473, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:09:40,679 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T01:09:40,687 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41477-0x101088ac6990001 connected 2024-11-19T01:09:40,700 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T01:09:40,702 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41477-0x101088ac6990001, quorum=127.0.0.1:57473, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T01:09:40,704 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41477-0x101088ac6990001, quorum=127.0.0.1:57473, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T01:09:40,715 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41477 2024-11-19T01:09:40,715 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41477 2024-11-19T01:09:40,715 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41477 2024-11-19T01:09:40,721 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41477 2024-11-19T01:09:40,722 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41477 2024-11-19T01:09:40,745 DEBUG [M:0;5134ffc85563:36495 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5134ffc85563:36495 2024-11-19T01:09:40,748 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5134ffc85563,36495,1731978580491 2024-11-19T01:09:40,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:09:40,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41477-0x101088ac6990001, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:09:40,751 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5134ffc85563,36495,1731978580491 2024-11-19T01:09:40,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41477-0x101088ac6990001, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T01:09:40,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:09:40,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41477-0x101088ac6990001, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:09:40,755 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T01:09:40,756 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5134ffc85563,36495,1731978580491 from backup master directory 2024-11-19T01:09:40,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41477-0x101088ac6990001, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:09:40,758 WARN [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T01:09:40,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5134ffc85563,36495,1731978580491 2024-11-19T01:09:40,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:09:40,758 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5134ffc85563,36495,1731978580491 2024-11-19T01:09:40,779 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/hbase.id] with ID: 4e187c4d-3e78-4899-aa68-e9facc4fc1f1 2024-11-19T01:09:40,779 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/.tmp/hbase.id 2024-11-19T01:09:40,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_1073741826_1002 (size=42) 2024-11-19T01:09:40,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35567 is added to blk_1073741826_1002 (size=42) 2024-11-19T01:09:40,803 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/.tmp/hbase.id]:[hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/hbase.id] 2024-11-19T01:09:40,826 INFO [master/5134ffc85563:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:09:40,826 INFO [master/5134ffc85563:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T01:09:40,830 INFO [master/5134ffc85563:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 4ms. 2024-11-19T01:09:40,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41477-0x101088ac6990001, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:09:40,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:09:40,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35567 is added to blk_1073741827_1003 (size=196) 2024-11-19T01:09:40,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_1073741827_1003 (size=196) 2024-11-19T01:09:40,866 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T01:09:40,867 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T01:09:40,867 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:09:40,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35567 is added to blk_1073741828_1004 (size=1189) 2024-11-19T01:09:40,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_1073741828_1004 (size=1189) 2024-11-19T01:09:40,906 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store 2024-11-19T01:09:40,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35567 is added to blk_1073741829_1005 (size=34) 2024-11-19T01:09:40,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_1073741829_1005 (size=34) 2024-11-19T01:09:41,325 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:09:41,325 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T01:09:41,325 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:09:41,325 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:09:41,326 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T01:09:41,326 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:09:41,326 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:09:41,326 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731978581325Disabling compacts and flushes for region at 1731978581325Disabling writes for close at 1731978581326 (+1 ms)Writing region close event to WAL at 1731978581326Closed at 1731978581326 2024-11-19T01:09:41,327 WARN [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/.initializing 2024-11-19T01:09:41,327 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/WALs/5134ffc85563,36495,1731978580491 2024-11-19T01:09:41,331 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C36495%2C1731978580491, suffix=, logDir=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/WALs/5134ffc85563,36495,1731978580491, archiveDir=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/oldWALs, maxLogs=10 2024-11-19T01:09:41,331 INFO [master/5134ffc85563:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C36495%2C1731978580491.1731978581331 2024-11-19T01:09:41,352 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/WALs/5134ffc85563,36495,1731978580491/5134ffc85563%2C36495%2C1731978580491.1731978581331 2024-11-19T01:09:41,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:41,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:41,394 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38021:38021),(127.0.0.1/127.0.0.1:46093:46093)] 2024-11-19T01:09:41,413 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T01:09:41,413 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:09:41,414 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:09:41,414 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:09:41,416 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:09:41,418 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T01:09:41,418 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:09:41,419 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:09:41,419 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:09:41,420 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T01:09:41,421 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:09:41,421 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:09:41,421 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:09:41,422 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T01:09:41,422 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:09:41,423 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:09:41,423 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:09:41,424 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T01:09:41,424 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:09:41,425 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:09:41,425 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:09:41,426 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:09:41,428 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:09:41,430 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:09:41,430 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:09:41,431 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T01:09:41,432 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:09:41,434 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T01:09:41,435 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=690666, jitterRate=-0.12177352607250214}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T01:09:41,436 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731978581414Initializing all the Stores at 1731978581415 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978581415Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978581416 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978581416Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978581416Cleaning up temporary data from old regions at 1731978581430 (+14 ms)Region opened successfully at 1731978581436 (+6 ms) 2024-11-19T01:09:41,436 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T01:09:41,440 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ab325d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5134ffc85563/172.17.0.2:0 2024-11-19T01:09:41,441 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T01:09:41,441 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T01:09:41,441 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T01:09:41,441 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T01:09:41,442 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T01:09:41,442 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T01:09:41,442 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T01:09:41,447 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T01:09:41,448 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T01:09:41,450 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T01:09:41,450 INFO [master/5134ffc85563:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T01:09:41,451 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T01:09:41,452 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T01:09:41,453 INFO [master/5134ffc85563:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T01:09:41,454 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T01:09:41,455 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T01:09:41,456 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T01:09:41,458 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T01:09:41,462 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T01:09:41,463 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T01:09:41,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41477-0x101088ac6990001, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T01:09:41,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T01:09:41,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41477-0x101088ac6990001, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:09:41,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:09:41,467 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5134ffc85563,36495,1731978580491, sessionid=0x101088ac6990000, setting cluster-up flag (Was=false) 2024-11-19T01:09:41,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41477-0x101088ac6990001, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:09:41,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:09:41,476 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T01:09:41,477 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5134ffc85563,36495,1731978580491 2024-11-19T01:09:41,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:09:41,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41477-0x101088ac6990001, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:09:41,486 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T01:09:41,487 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5134ffc85563,36495,1731978580491 2024-11-19T01:09:41,488 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T01:09:41,490 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T01:09:41,491 INFO [master/5134ffc85563:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T01:09:41,491 INFO [master/5134ffc85563:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T01:09:41,491 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5134ffc85563,36495,1731978580491 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T01:09:41,493 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:09:41,493 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:09:41,493 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:09:41,493 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:09:41,493 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5134ffc85563:0, corePoolSize=10, maxPoolSize=10 2024-11-19T01:09:41,493 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:09:41,493 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5134ffc85563:0, corePoolSize=2, maxPoolSize=2 2024-11-19T01:09:41,494 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:09:41,495 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731978611495 2024-11-19T01:09:41,496 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T01:09:41,496 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T01:09:41,496 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T01:09:41,496 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T01:09:41,496 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T01:09:41,496 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T01:09:41,497 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:09:41,497 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T01:09:41,498 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:41,499 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:09:41,499 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T01:09:41,501 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T01:09:41,501 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T01:09:41,502 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T01:09:41,506 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T01:09:41,506 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T01:09:41,508 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978581506,5,FailOnTimeoutGroup] 2024-11-19T01:09:41,512 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978581508,5,FailOnTimeoutGroup] 2024-11-19T01:09:41,512 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:41,512 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T01:09:41,512 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:41,512 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:41,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35567 is added to blk_1073741831_1007 (size=1321) 2024-11-19T01:09:41,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_1073741831_1007 (size=1321) 2024-11-19T01:09:41,520 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T01:09:41,520 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d 2024-11-19T01:09:41,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35567 is added to blk_1073741832_1008 (size=32) 2024-11-19T01:09:41,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_1073741832_1008 (size=32) 2024-11-19T01:09:41,528 INFO [RS:0;5134ffc85563:41477 {}] regionserver.HRegionServer(746): ClusterId : 4e187c4d-3e78-4899-aa68-e9facc4fc1f1 2024-11-19T01:09:41,528 DEBUG [RS:0;5134ffc85563:41477 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T01:09:41,531 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:09:41,531 DEBUG [RS:0;5134ffc85563:41477 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T01:09:41,531 DEBUG [RS:0;5134ffc85563:41477 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T01:09:41,532 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T01:09:41,533 DEBUG [RS:0;5134ffc85563:41477 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T01:09:41,533 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T01:09:41,533 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:09:41,534 DEBUG [RS:0;5134ffc85563:41477 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@502bcdb0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5134ffc85563/172.17.0.2:0 2024-11-19T01:09:41,534 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:09:41,534 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T01:09:41,535 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T01:09:41,535 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:09:41,536 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:09:41,536 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T01:09:41,537 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T01:09:41,537 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:09:41,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:09:41,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T01:09:41,539 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T01:09:41,539 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:09:41,540 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:09:41,540 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T01:09:41,541 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740 2024-11-19T01:09:41,541 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740 2024-11-19T01:09:41,544 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T01:09:41,544 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T01:09:41,544 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T01:09:41,545 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T01:09:41,548 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T01:09:41,548 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=811577, jitterRate=0.031974464654922485}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T01:09:41,550 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731978581531Initializing all the Stores at 1731978581531Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978581532 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978581532Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978581532Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978581532Cleaning up temporary data from old regions at 1731978581544 (+12 ms)Region opened successfully at 1731978581549 (+5 ms) 2024-11-19T01:09:41,550 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T01:09:41,550 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T01:09:41,550 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T01:09:41,550 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T01:09:41,550 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T01:09:41,551 DEBUG [RS:0;5134ffc85563:41477 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5134ffc85563:41477 2024-11-19T01:09:41,551 INFO [RS:0;5134ffc85563:41477 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T01:09:41,551 INFO [RS:0;5134ffc85563:41477 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T01:09:41,551 DEBUG [RS:0;5134ffc85563:41477 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T01:09:41,552 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T01:09:41,552 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731978581550Disabling compacts and flushes for region at 1731978581550Disabling writes for close at 1731978581550Writing region close event to WAL at 1731978581552 (+2 ms)Closed at 1731978581552 2024-11-19T01:09:41,552 INFO [RS:0;5134ffc85563:41477 {}] regionserver.HRegionServer(2659): reportForDuty to master=5134ffc85563,36495,1731978580491 with port=41477, startcode=1731978580633 2024-11-19T01:09:41,552 DEBUG [RS:0;5134ffc85563:41477 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T01:09:41,554 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:09:41,554 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T01:09:41,554 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T01:09:41,556 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T01:09:41,557 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T01:09:41,564 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59297, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T01:09:41,565 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36495 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5134ffc85563,41477,1731978580633 2024-11-19T01:09:41,565 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36495 {}] master.ServerManager(517): Registering regionserver=5134ffc85563,41477,1731978580633 2024-11-19T01:09:41,568 DEBUG [RS:0;5134ffc85563:41477 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d 2024-11-19T01:09:41,568 DEBUG [RS:0;5134ffc85563:41477 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43141 2024-11-19T01:09:41,568 DEBUG [RS:0;5134ffc85563:41477 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T01:09:41,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T01:09:41,574 DEBUG [RS:0;5134ffc85563:41477 {}] zookeeper.ZKUtil(111): regionserver:41477-0x101088ac6990001, quorum=127.0.0.1:57473, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5134ffc85563,41477,1731978580633 2024-11-19T01:09:41,574 WARN [RS:0;5134ffc85563:41477 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T01:09:41,574 INFO [RS:0;5134ffc85563:41477 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:09:41,574 DEBUG [RS:0;5134ffc85563:41477 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633 2024-11-19T01:09:41,578 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5134ffc85563,41477,1731978580633] 2024-11-19T01:09:41,590 INFO [RS:0;5134ffc85563:41477 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T01:09:41,605 INFO [RS:0;5134ffc85563:41477 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T01:09:41,613 INFO [RS:0;5134ffc85563:41477 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T01:09:41,613 INFO [RS:0;5134ffc85563:41477 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:41,625 INFO [RS:0;5134ffc85563:41477 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T01:09:41,631 INFO [RS:0;5134ffc85563:41477 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T01:09:41,631 INFO [RS:0;5134ffc85563:41477 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:41,631 DEBUG [RS:0;5134ffc85563:41477 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:09:41,631 DEBUG [RS:0;5134ffc85563:41477 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:09:41,631 DEBUG [RS:0;5134ffc85563:41477 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:09:41,631 DEBUG [RS:0;5134ffc85563:41477 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:09:41,631 DEBUG [RS:0;5134ffc85563:41477 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:09:41,631 DEBUG [RS:0;5134ffc85563:41477 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5134ffc85563:0, corePoolSize=2, maxPoolSize=2 2024-11-19T01:09:41,631 DEBUG [RS:0;5134ffc85563:41477 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:09:41,632 DEBUG [RS:0;5134ffc85563:41477 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:09:41,632 DEBUG [RS:0;5134ffc85563:41477 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:09:41,632 DEBUG [RS:0;5134ffc85563:41477 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:09:41,632 DEBUG [RS:0;5134ffc85563:41477 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:09:41,632 DEBUG [RS:0;5134ffc85563:41477 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:09:41,632 DEBUG [RS:0;5134ffc85563:41477 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5134ffc85563:0, corePoolSize=3, maxPoolSize=3 2024-11-19T01:09:41,632 DEBUG [RS:0;5134ffc85563:41477 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0, corePoolSize=3, maxPoolSize=3 2024-11-19T01:09:41,658 INFO [RS:0;5134ffc85563:41477 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:41,659 INFO [RS:0;5134ffc85563:41477 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:41,659 INFO [RS:0;5134ffc85563:41477 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:41,659 INFO [RS:0;5134ffc85563:41477 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:41,659 INFO [RS:0;5134ffc85563:41477 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:41,659 INFO [RS:0;5134ffc85563:41477 {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,41477,1731978580633-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T01:09:41,687 INFO [RS:0;5134ffc85563:41477 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T01:09:41,687 INFO [RS:0;5134ffc85563:41477 {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,41477,1731978580633-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:41,688 INFO [RS:0;5134ffc85563:41477 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:41,688 INFO [RS:0;5134ffc85563:41477 {}] regionserver.Replication(171): 5134ffc85563,41477,1731978580633 started 2024-11-19T01:09:41,708 WARN [5134ffc85563:36495 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T01:09:41,709 INFO [RS:0;5134ffc85563:41477 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:41,709 INFO [RS:0;5134ffc85563:41477 {}] regionserver.HRegionServer(1482): Serving as 5134ffc85563,41477,1731978580633, RpcServer on 5134ffc85563/172.17.0.2:41477, sessionid=0x101088ac6990001 2024-11-19T01:09:41,710 DEBUG [RS:0;5134ffc85563:41477 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T01:09:41,710 DEBUG [RS:0;5134ffc85563:41477 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5134ffc85563,41477,1731978580633 2024-11-19T01:09:41,710 DEBUG [RS:0;5134ffc85563:41477 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5134ffc85563,41477,1731978580633' 2024-11-19T01:09:41,710 DEBUG [RS:0;5134ffc85563:41477 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T01:09:41,711 DEBUG [RS:0;5134ffc85563:41477 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T01:09:41,711 DEBUG [RS:0;5134ffc85563:41477 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T01:09:41,711 DEBUG [RS:0;5134ffc85563:41477 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T01:09:41,711 DEBUG [RS:0;5134ffc85563:41477 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5134ffc85563,41477,1731978580633 2024-11-19T01:09:41,711 DEBUG [RS:0;5134ffc85563:41477 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5134ffc85563,41477,1731978580633' 2024-11-19T01:09:41,712 DEBUG [RS:0;5134ffc85563:41477 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T01:09:41,712 DEBUG [RS:0;5134ffc85563:41477 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T01:09:41,713 DEBUG [RS:0;5134ffc85563:41477 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T01:09:41,713 INFO [RS:0;5134ffc85563:41477 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T01:09:41,713 INFO [RS:0;5134ffc85563:41477 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T01:09:41,816 INFO [RS:0;5134ffc85563:41477 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C41477%2C1731978580633, suffix=, logDir=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633, archiveDir=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/oldWALs, maxLogs=32 2024-11-19T01:09:41,817 INFO [RS:0;5134ffc85563:41477 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C41477%2C1731978580633.1731978581817 2024-11-19T01:09:41,859 INFO [RS:0;5134ffc85563:41477 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978581817 2024-11-19T01:09:41,873 DEBUG [RS:0;5134ffc85563:41477 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38021:38021),(127.0.0.1/127.0.0.1:46093:46093)] 2024-11-19T01:09:41,958 DEBUG [5134ffc85563:36495 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T01:09:41,959 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5134ffc85563,41477,1731978580633 2024-11-19T01:09:41,961 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5134ffc85563,41477,1731978580633, state=OPENING 2024-11-19T01:09:41,963 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T01:09:41,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41477-0x101088ac6990001, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:09:41,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:09:41,966 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:09:41,966 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:09:41,968 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T01:09:41,968 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5134ffc85563,41477,1731978580633}] 2024-11-19T01:09:42,123 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T01:09:42,125 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53097, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T01:09:42,130 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T01:09:42,130 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:09:42,133 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C41477%2C1731978580633.meta, suffix=.meta, logDir=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633, archiveDir=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/oldWALs, maxLogs=32 2024-11-19T01:09:42,133 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C41477%2C1731978580633.meta.1731978582133.meta 2024-11-19T01:09:42,149 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.meta.1731978582133.meta 2024-11-19T01:09:42,157 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46093:46093),(127.0.0.1/127.0.0.1:38021:38021)] 2024-11-19T01:09:42,165 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T01:09:42,166 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T01:09:42,166 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T01:09:42,167 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T01:09:42,167 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T01:09:42,167 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:09:42,167 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T01:09:42,167 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T01:09:42,172 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T01:09:42,173 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T01:09:42,173 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:09:42,174 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:09:42,174 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T01:09:42,176 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T01:09:42,176 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:09:42,176 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:09:42,177 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T01:09:42,180 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T01:09:42,180 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:09:42,180 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:09:42,180 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T01:09:42,182 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T01:09:42,182 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:09:42,183 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:09:42,183 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T01:09:42,184 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740 2024-11-19T01:09:42,185 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740 2024-11-19T01:09:42,187 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T01:09:42,187 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T01:09:42,189 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T01:09:42,190 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T01:09:42,191 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=726527, jitterRate=-0.07617399096488953}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T01:09:42,191 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T01:09:42,193 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731978582167Writing region info on filesystem at 1731978582167Initializing all the Stores at 1731978582169 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978582169Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978582171 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978582171Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978582171Cleaning up temporary data from old regions at 1731978582187 (+16 ms)Running coprocessor post-open hooks at 1731978582191 (+4 ms)Region opened successfully at 1731978582192 (+1 ms) 2024-11-19T01:09:42,194 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731978582122 2024-11-19T01:09:42,198 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T01:09:42,198 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T01:09:42,199 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5134ffc85563,41477,1731978580633 2024-11-19T01:09:42,200 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5134ffc85563,41477,1731978580633, state=OPEN 2024-11-19T01:09:42,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T01:09:42,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41477-0x101088ac6990001, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T01:09:42,205 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:09:42,205 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5134ffc85563,41477,1731978580633 2024-11-19T01:09:42,205 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:09:42,208 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T01:09:42,209 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5134ffc85563,41477,1731978580633 in 237 msec 2024-11-19T01:09:42,212 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T01:09:42,212 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 655 msec 2024-11-19T01:09:42,213 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:09:42,213 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T01:09:42,217 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T01:09:42,217 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5134ffc85563,41477,1731978580633, seqNum=-1] 2024-11-19T01:09:42,218 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T01:09:42,219 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35881, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T01:09:42,227 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 735 msec 2024-11-19T01:09:42,228 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731978582228, completionTime=-1 2024-11-19T01:09:42,228 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T01:09:42,228 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T01:09:42,230 INFO [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T01:09:42,230 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731978642230 2024-11-19T01:09:42,230 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731978702230 2024-11-19T01:09:42,230 INFO [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-19T01:09:42,231 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,36495,1731978580491-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:42,231 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,36495,1731978580491-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:42,231 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,36495,1731978580491-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:42,231 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5134ffc85563:36495, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:42,231 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:42,233 DEBUG [master/5134ffc85563:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T01:09:42,233 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:42,237 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.478sec 2024-11-19T01:09:42,237 INFO [master/5134ffc85563:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T01:09:42,237 INFO [master/5134ffc85563:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T01:09:42,237 INFO [master/5134ffc85563:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T01:09:42,237 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T01:09:42,237 INFO [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T01:09:42,237 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,36495,1731978580491-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T01:09:42,238 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,36495,1731978580491-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T01:09:42,241 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T01:09:42,241 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T01:09:42,241 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,36495,1731978580491-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:09:42,328 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37687418, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:09:42,329 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5134ffc85563,36495,-1 for getting cluster id 2024-11-19T01:09:42,329 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T01:09:42,335 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4e187c4d-3e78-4899-aa68-e9facc4fc1f1' 2024-11-19T01:09:42,336 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T01:09:42,337 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4e187c4d-3e78-4899-aa68-e9facc4fc1f1" 2024-11-19T01:09:42,337 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17607df8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:09:42,337 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5134ffc85563,36495,-1] 2024-11-19T01:09:42,337 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T01:09:42,338 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:09:42,341 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47964, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T01:09:42,342 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f231849, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:09:42,342 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T01:09:42,343 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5134ffc85563,41477,1731978580633, seqNum=-1] 2024-11-19T01:09:42,344 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T01:09:42,346 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54228, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T01:09:42,348 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5134ffc85563,36495,1731978580491 2024-11-19T01:09:42,349 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:09:42,352 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T01:09:42,352 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-19T01:09:42,352 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-19T01:09:42,353 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T01:09:42,354 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 5134ffc85563,36495,1731978580491 2024-11-19T01:09:42,355 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3125b3d8 2024-11-19T01:09:42,355 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T01:09:42,363 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47970, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T01:09:42,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36495 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T01:09:42,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36495 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T01:09:42,366 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36495 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T01:09:42,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36495 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-19T01:09:42,371 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T01:09:42,371 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:09:42,371 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36495 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-19T01:09:42,374 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T01:09:42,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36495 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T01:09:42,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:42,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:42,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35567 is added to blk_1073741835_1011 (size=395) 2024-11-19T01:09:42,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_1073741835_1011 (size=395) 2024-11-19T01:09:42,398 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 240a07b85f7d48a22908895ae87cf7f3, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d 2024-11-19T01:09:42,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40001 is added to blk_1073741836_1012 (size=78) 2024-11-19T01:09:42,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35567 is added to blk_1073741836_1012 (size=78) 2024-11-19T01:09:42,410 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:09:42,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 240a07b85f7d48a22908895ae87cf7f3, disabling compactions & flushes 2024-11-19T01:09:42,411 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3. 2024-11-19T01:09:42,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3. 2024-11-19T01:09:42,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3. after waiting 0 ms 2024-11-19T01:09:42,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3. 2024-11-19T01:09:42,411 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3. 2024-11-19T01:09:42,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 240a07b85f7d48a22908895ae87cf7f3: Waiting for close lock at 1731978582411Disabling compacts and flushes for region at 1731978582411Disabling writes for close at 1731978582411Writing region close event to WAL at 1731978582411Closed at 1731978582411 2024-11-19T01:09:42,414 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T01:09:42,414 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731978582414"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731978582414"}]},"ts":"1731978582414"} 2024-11-19T01:09:42,417 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T01:09:42,419 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T01:09:42,419 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731978582419"}]},"ts":"1731978582419"} 2024-11-19T01:09:42,421 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-19T01:09:42,422 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=240a07b85f7d48a22908895ae87cf7f3, ASSIGN}] 2024-11-19T01:09:42,426 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=240a07b85f7d48a22908895ae87cf7f3, ASSIGN 2024-11-19T01:09:42,432 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=240a07b85f7d48a22908895ae87cf7f3, ASSIGN; state=OFFLINE, location=5134ffc85563,41477,1731978580633; forceNewPlan=false, retain=false 2024-11-19T01:09:42,583 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=240a07b85f7d48a22908895ae87cf7f3, regionState=OPENING, regionLocation=5134ffc85563,41477,1731978580633 2024-11-19T01:09:42,586 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=240a07b85f7d48a22908895ae87cf7f3, ASSIGN because future has completed 2024-11-19T01:09:42,587 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 240a07b85f7d48a22908895ae87cf7f3, server=5134ffc85563,41477,1731978580633}] 2024-11-19T01:09:42,746 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3. 2024-11-19T01:09:42,747 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 240a07b85f7d48a22908895ae87cf7f3, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3.', STARTKEY => '', ENDKEY => ''} 2024-11-19T01:09:42,747 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 240a07b85f7d48a22908895ae87cf7f3 2024-11-19T01:09:42,747 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:09:42,747 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 240a07b85f7d48a22908895ae87cf7f3 2024-11-19T01:09:42,747 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 240a07b85f7d48a22908895ae87cf7f3 2024-11-19T01:09:42,749 INFO [StoreOpener-240a07b85f7d48a22908895ae87cf7f3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 240a07b85f7d48a22908895ae87cf7f3 2024-11-19T01:09:42,750 INFO [StoreOpener-240a07b85f7d48a22908895ae87cf7f3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 240a07b85f7d48a22908895ae87cf7f3 columnFamilyName info 2024-11-19T01:09:42,751 DEBUG [StoreOpener-240a07b85f7d48a22908895ae87cf7f3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:09:42,751 INFO [StoreOpener-240a07b85f7d48a22908895ae87cf7f3-1 {}] regionserver.HStore(327): Store=240a07b85f7d48a22908895ae87cf7f3/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:09:42,751 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 240a07b85f7d48a22908895ae87cf7f3 2024-11-19T01:09:42,752 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/default/TestLogRolling-testLogRollOnPipelineRestart/240a07b85f7d48a22908895ae87cf7f3 2024-11-19T01:09:42,753 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/default/TestLogRolling-testLogRollOnPipelineRestart/240a07b85f7d48a22908895ae87cf7f3 2024-11-19T01:09:42,753 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 240a07b85f7d48a22908895ae87cf7f3 2024-11-19T01:09:42,753 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 240a07b85f7d48a22908895ae87cf7f3 2024-11-19T01:09:42,758 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 240a07b85f7d48a22908895ae87cf7f3 2024-11-19T01:09:42,761 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/default/TestLogRolling-testLogRollOnPipelineRestart/240a07b85f7d48a22908895ae87cf7f3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T01:09:42,763 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 240a07b85f7d48a22908895ae87cf7f3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=713490, jitterRate=-0.09275120496749878}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T01:09:42,763 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 240a07b85f7d48a22908895ae87cf7f3 2024-11-19T01:09:42,764 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 240a07b85f7d48a22908895ae87cf7f3: Running coprocessor pre-open hook at 1731978582747Writing region info on filesystem at 1731978582747Initializing all the Stores at 1731978582748 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978582748Cleaning up temporary data from old regions at 1731978582753 (+5 ms)Running coprocessor post-open hooks at 1731978582763 (+10 ms)Region opened successfully at 1731978582764 (+1 ms) 2024-11-19T01:09:42,766 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3., pid=6, masterSystemTime=1731978582741 2024-11-19T01:09:42,769 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3. 2024-11-19T01:09:42,769 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3. 2024-11-19T01:09:42,770 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=240a07b85f7d48a22908895ae87cf7f3, regionState=OPEN, openSeqNum=2, regionLocation=5134ffc85563,41477,1731978580633 2024-11-19T01:09:42,773 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 240a07b85f7d48a22908895ae87cf7f3, server=5134ffc85563,41477,1731978580633 because future has completed 2024-11-19T01:09:42,778 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T01:09:42,778 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 240a07b85f7d48a22908895ae87cf7f3, server=5134ffc85563,41477,1731978580633 in 188 msec 2024-11-19T01:09:42,781 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T01:09:42,781 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=240a07b85f7d48a22908895ae87cf7f3, ASSIGN in 357 msec 2024-11-19T01:09:42,782 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T01:09:42,782 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731978582782"}]},"ts":"1731978582782"} 2024-11-19T01:09:42,785 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-19T01:09:42,786 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T01:09:42,789 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 420 msec 2024-11-19T01:09:43,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:43,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:44,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T01:09:44,319 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T01:09:44,320 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-19T01:09:44,320 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-19T01:09:44,321 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T01:09:44,321 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T01:09:44,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:44,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:45,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:45,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:46,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:46,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:47,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:47,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:47,669 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T01:09:47,688 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:47,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:47,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:47,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:47,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:47,690 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:47,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:47,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:47,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:47,695 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:09:47,700 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T01:09:47,700 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-19T01:09:48,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:48,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:49,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:49,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:50,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:50,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:51,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:51,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:52,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:52,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:52,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36495 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T01:09:52,455 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-19T01:09:52,455 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-19T01:09:52,458 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-19T01:09:52,458 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3. 2024-11-19T01:09:52,461 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3., hostname=5134ffc85563,41477,1731978580633, seqNum=2] 2024-11-19T01:09:53,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:53,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:54,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:54,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:54,464 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978581817 2024-11-19T01:09:54,465 WARN [ResponseProcessor for block BP-1068624987-172.17.0.2-1731978579487:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1068624987-172.17.0.2-1731978579487:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1068624987-172.17.0.2-1731978579487:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:35567,DS-6915a258-9e19-42b4-aae1-48f3a5f0542c,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:54,465 WARN [ResponseProcessor for block BP-1068624987-172.17.0.2-1731978579487:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1068624987-172.17.0.2-1731978579487:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:54,465 WARN [ResponseProcessor for block BP-1068624987-172.17.0.2-1731978579487:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1068624987-172.17.0.2-1731978579487:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:54,465 WARN [DataStreamer for file /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.meta.1731978582133.meta block BP-1068624987-172.17.0.2-1731978579487:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1068624987-172.17.0.2-1731978579487:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40001,DS-74f4d546-c1be-4454-8688-ac7cce9b14fa,DISK], DatanodeInfoWithStorage[127.0.0.1:35567,DS-6915a258-9e19-42b4-aae1-48f3a5f0542c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35567,DS-6915a258-9e19-42b4-aae1-48f3a5f0542c,DISK]) is bad. 2024-11-19T01:09:54,465 WARN [DataStreamer for file /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978581817 block BP-1068624987-172.17.0.2-1731978579487:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1068624987-172.17.0.2-1731978579487:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35567,DS-6915a258-9e19-42b4-aae1-48f3a5f0542c,DISK], DatanodeInfoWithStorage[127.0.0.1:40001,DS-74f4d546-c1be-4454-8688-ac7cce9b14fa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35567,DS-6915a258-9e19-42b4-aae1-48f3a5f0542c,DISK]) is bad. 2024-11-19T01:09:54,466 WARN [DataStreamer for file /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/WALs/5134ffc85563,36495,1731978580491/5134ffc85563%2C36495%2C1731978580491.1731978581331 block BP-1068624987-172.17.0.2-1731978579487:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1068624987-172.17.0.2-1731978579487:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35567,DS-6915a258-9e19-42b4-aae1-48f3a5f0542c,DISK], DatanodeInfoWithStorage[127.0.0.1:40001,DS-74f4d546-c1be-4454-8688-ac7cce9b14fa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35567,DS-6915a258-9e19-42b4-aae1-48f3a5f0542c,DISK]) is bad. 2024-11-19T01:09:54,466 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-165739279_22 at /127.0.0.1:55294 [Receiving block BP-1068624987-172.17.0.2-1731978579487:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35567:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55294 dst: /127.0.0.1:35567 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:54,466 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_188906940_22 at /127.0.0.1:46472 [Receiving block BP-1068624987-172.17.0.2-1731978579487:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35567:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46472 dst: /127.0.0.1:35567 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:54,467 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_188906940_22 at /127.0.0.1:35982 [Receiving block BP-1068624987-172.17.0.2-1731978579487:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40001:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35982 dst: /127.0.0.1:40001 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:54,467 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-165739279_22 at /127.0.0.1:53616 [Receiving block BP-1068624987-172.17.0.2-1731978579487:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40001:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53616 dst: /127.0.0.1:40001 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:54,467 WARN [PacketResponder: BP-1068624987-172.17.0.2-1731978579487:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35567] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:54,468 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-165739279_22 at /127.0.0.1:53630 [Receiving block BP-1068624987-172.17.0.2-1731978579487:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40001:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53630 dst: /127.0.0.1:40001 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:54,468 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-165739279_22 at /127.0.0.1:55304 [Receiving block BP-1068624987-172.17.0.2-1731978579487:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35567:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55304 dst: /127.0.0.1:35567 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:54,475 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2c71ff07{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:09:54,476 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1a9fc515{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:09:54,476 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:09:54,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11b4bf4f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:09:54,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@708201bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/hadoop.log.dir/,STOPPED} 2024-11-19T01:09:54,478 WARN [BP-1068624987-172.17.0.2-1731978579487 heartbeating to localhost/127.0.0.1:43141 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:09:54,478 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:09:54,478 WARN [BP-1068624987-172.17.0.2-1731978579487 heartbeating to localhost/127.0.0.1:43141 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1068624987-172.17.0.2-1731978579487 (Datanode Uuid 54b8e1eb-7bf8-4760-a6c7-b7e0150a1a88) service to localhost/127.0.0.1:43141 2024-11-19T01:09:54,478 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:09:54,478 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/cluster_5bcfa593-45b8-16fe-ae11-8157ba1ac254/data/data3/current/BP-1068624987-172.17.0.2-1731978579487 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:09:54,479 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/cluster_5bcfa593-45b8-16fe-ae11-8157ba1ac254/data/data4/current/BP-1068624987-172.17.0.2-1731978579487 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:09:54,479 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:09:54,545 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:09:54,564 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:09:54,573 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:09:54,573 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:09:54,573 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T01:09:54,574 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5aaa2828{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:09:54,574 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@235b3635{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:09:54,760 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1fde6484{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/java.io.tmpdir/jetty-localhost-45547-hadoop-hdfs-3_4_1-tests_jar-_-any-6094979941702145420/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:09:54,773 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1e01093b{HTTP/1.1, (http/1.1)}{localhost:45547} 2024-11-19T01:09:54,773 INFO [Time-limited test {}] server.Server(415): Started @166069ms 2024-11-19T01:09:54,775 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:09:54,866 WARN [ResponseProcessor for block BP-1068624987-172.17.0.2-1731978579487:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1068624987-172.17.0.2-1731978579487:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:54,867 WARN [ResponseProcessor for block BP-1068624987-172.17.0.2-1731978579487:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1068624987-172.17.0.2-1731978579487:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:54,867 WARN [ResponseProcessor for block BP-1068624987-172.17.0.2-1731978579487:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1068624987-172.17.0.2-1731978579487:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:54,867 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-165739279_22 at /127.0.0.1:49992 [Receiving block BP-1068624987-172.17.0.2-1731978579487:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40001:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49992 dst: /127.0.0.1:40001 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:54,868 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-165739279_22 at /127.0.0.1:49976 [Receiving block BP-1068624987-172.17.0.2-1731978579487:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40001:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49976 dst: /127.0.0.1:40001 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:54,868 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_188906940_22 at /127.0.0.1:49994 [Receiving block BP-1068624987-172.17.0.2-1731978579487:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40001:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49994 dst: /127.0.0.1:40001 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:09:54,889 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@482b04db{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:09:54,889 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5cc14744{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:09:54,889 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:09:54,890 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@354edf1e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:09:54,890 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ff32a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/hadoop.log.dir/,STOPPED} 2024-11-19T01:09:54,893 WARN [BP-1068624987-172.17.0.2-1731978579487 heartbeating to localhost/127.0.0.1:43141 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:09:54,893 WARN [BP-1068624987-172.17.0.2-1731978579487 heartbeating to localhost/127.0.0.1:43141 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1068624987-172.17.0.2-1731978579487 (Datanode Uuid d0d06abc-686e-4ffd-af0f-d2b6f11995bc) service to localhost/127.0.0.1:43141 2024-11-19T01:09:54,894 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/cluster_5bcfa593-45b8-16fe-ae11-8157ba1ac254/data/data1/current/BP-1068624987-172.17.0.2-1731978579487 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:09:54,894 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/cluster_5bcfa593-45b8-16fe-ae11-8157ba1ac254/data/data2/current/BP-1068624987-172.17.0.2-1731978579487 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:09:54,894 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:09:54,896 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:09:54,896 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:09:54,948 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:09:54,978 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:09:55,018 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:09:55,018 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:09:55,018 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T01:09:55,021 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@12096ccf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:09:55,022 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@673d3bba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:09:55,109 WARN [Thread-1336 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:09:55,121 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x82db33a396186f21 with lease ID 0x5ae1cba4fe4df1e1: from storage DS-6915a258-9e19-42b4-aae1-48f3a5f0542c node DatanodeRegistration(127.0.0.1:39925, datanodeUuid=54b8e1eb-7bf8-4760-a6c7-b7e0150a1a88, infoPort=45983, infoSecurePort=0, ipcPort=40847, storageInfo=lv=-57;cid=testClusterID;nsid=33876494;c=1731978579487), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:09:55,121 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x82db33a396186f21 with lease ID 0x5ae1cba4fe4df1e1: from storage DS-5bc4d2fe-2d03-490b-8540-6f0185f950b9 node DatanodeRegistration(127.0.0.1:39925, datanodeUuid=54b8e1eb-7bf8-4760-a6c7-b7e0150a1a88, infoPort=45983, infoSecurePort=0, ipcPort=40847, storageInfo=lv=-57;cid=testClusterID;nsid=33876494;c=1731978579487), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:09:55,214 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4da0cc4f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/java.io.tmpdir/jetty-localhost-42997-hadoop-hdfs-3_4_1-tests_jar-_-any-12813152178885699576/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:09:55,215 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@25ea055e{HTTP/1.1, (http/1.1)}{localhost:42997} 2024-11-19T01:09:55,215 INFO [Time-limited test {}] server.Server(415): Started @166511ms 2024-11-19T01:09:55,216 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:09:55,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:55,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:55,452 WARN [Thread-1367 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:09:55,456 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x19b9cfd3fa619237 with lease ID 0x5ae1cba4fe4df1e2: from storage DS-74f4d546-c1be-4454-8688-ac7cce9b14fa node DatanodeRegistration(127.0.0.1:45065, datanodeUuid=d0d06abc-686e-4ffd-af0f-d2b6f11995bc, infoPort=46675, infoSecurePort=0, ipcPort=41929, storageInfo=lv=-57;cid=testClusterID;nsid=33876494;c=1731978579487), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:09:55,456 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x19b9cfd3fa619237 with lease ID 0x5ae1cba4fe4df1e2: from storage DS-1ca14107-c1f5-49e6-b1b6-3b373ad17050 node DatanodeRegistration(127.0.0.1:45065, datanodeUuid=d0d06abc-686e-4ffd-af0f-d2b6f11995bc, infoPort=46675, infoSecurePort=0, ipcPort=41929, storageInfo=lv=-57;cid=testClusterID;nsid=33876494;c=1731978579487), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:09:56,358 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-19T01:09:56,361 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-19T01:09:56,362 ERROR [FSHLog-0-hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d-prefix:5134ffc85563,41477,1731978580633 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40001,DS-74f4d546-c1be-4454-8688-ac7cce9b14fa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:56,363 WARN [FSHLog-0-hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d-prefix:5134ffc85563,41477,1731978580633 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40001,DS-74f4d546-c1be-4454-8688-ac7cce9b14fa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:56,363 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5134ffc85563%2C41477%2C1731978580633:(num 1731978581817) roll requested 2024-11-19T01:09:56,363 INFO [regionserver/5134ffc85563:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C41477%2C1731978580633.1731978596363 2024-11-19T01:09:56,373 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978581817 newFile=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978596363 2024-11-19T01:09:56,373 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:56,373 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:56,373 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:56,373 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:56,374 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:09:56,374 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978581817 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978596363 2024-11-19T01:09:56,374 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40001,DS-74f4d546-c1be-4454-8688-ac7cce9b14fa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:56,375 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40001,DS-74f4d546-c1be-4454-8688-ac7cce9b14fa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:09:56,375 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978581817 2024-11-19T01:09:56,375 WARN [IPC Server handler 0 on default port 43141 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978581817 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-19T01:09:56,376 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978581817 after 0ms 2024-11-19T01:09:56,381 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46675:46675),(127.0.0.1/127.0.0.1:45983:45983)] 2024-11-19T01:09:56,381 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978581817 is not closed yet, will try archiving it next time 2024-11-19T01:09:56,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:56,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:57,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:57,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:58,121 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-19T01:09:58,386 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-19T01:09:58,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:58,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:59,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:09:59,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:00,377 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978581817 after 4002ms 2024-11-19T01:10:00,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:00,392 WARN [ResponseProcessor for block BP-1068624987-172.17.0.2-1731978579487:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1068624987-172.17.0.2-1731978579487:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:10:00,394 WARN [DataStreamer for file /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978596363 block BP-1068624987-172.17.0.2-1731978579487:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1068624987-172.17.0.2-1731978579487:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45065,DS-74f4d546-c1be-4454-8688-ac7cce9b14fa,DISK], DatanodeInfoWithStorage[127.0.0.1:39925,DS-6915a258-9e19-42b4-aae1-48f3a5f0542c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45065,DS-74f4d546-c1be-4454-8688-ac7cce9b14fa,DISK]) is bad. 2024-11-19T01:10:00,393 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-165739279_22 at /127.0.0.1:37184 [Receiving block BP-1068624987-172.17.0.2-1731978579487:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45065:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37184 dst: /127.0.0.1:45065 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:10:00,394 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-165739279_22 at /127.0.0.1:37820 [Receiving block BP-1068624987-172.17.0.2-1731978579487:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:39925:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37820 dst: /127.0.0.1:39925 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:10:00,400 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4da0cc4f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:10:00,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:00,400 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@25ea055e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:10:00,400 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:10:00,401 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@673d3bba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:10:00,401 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@12096ccf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/hadoop.log.dir/,STOPPED} 2024-11-19T01:10:00,403 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:10:00,403 WARN [BP-1068624987-172.17.0.2-1731978579487 heartbeating to localhost/127.0.0.1:43141 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:10:00,403 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:10:00,403 WARN [BP-1068624987-172.17.0.2-1731978579487 heartbeating to localhost/127.0.0.1:43141 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1068624987-172.17.0.2-1731978579487 (Datanode Uuid d0d06abc-686e-4ffd-af0f-d2b6f11995bc) service to localhost/127.0.0.1:43141 2024-11-19T01:10:00,403 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/cluster_5bcfa593-45b8-16fe-ae11-8157ba1ac254/data/data1/current/BP-1068624987-172.17.0.2-1731978579487 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:10:00,404 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/cluster_5bcfa593-45b8-16fe-ae11-8157ba1ac254/data/data2/current/BP-1068624987-172.17.0.2-1731978579487 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:10:00,404 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:10:00,422 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:10:00,432 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:10:00,449 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:10:00,450 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:10:00,450 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T01:10:00,455 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c5aa216{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:10:00,455 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45604664{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:10:00,590 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3719e37e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/java.io.tmpdir/jetty-localhost-44339-hadoop-hdfs-3_4_1-tests_jar-_-any-11231610209916975481/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:10:00,591 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@45c55ac5{HTTP/1.1, (http/1.1)}{localhost:44339} 2024-11-19T01:10:00,591 INFO [Time-limited test {}] server.Server(415): Started @171887ms 2024-11-19T01:10:00,593 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:10:00,639 WARN [ResponseProcessor for block BP-1068624987-172.17.0.2-1731978579487:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1068624987-172.17.0.2-1731978579487:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:10:00,640 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-165739279_22 at /127.0.0.1:37836 [Receiving block BP-1068624987-172.17.0.2-1731978579487:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:39925:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37836 dst: /127.0.0.1:39925 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:10:00,663 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1fde6484{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:10:00,663 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1e01093b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:10:00,663 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:10:00,664 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@235b3635{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:10:00,664 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5aaa2828{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/hadoop.log.dir/,STOPPED} 2024-11-19T01:10:00,665 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:10:00,665 WARN [BP-1068624987-172.17.0.2-1731978579487 heartbeating to localhost/127.0.0.1:43141 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:10:00,665 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:10:00,666 WARN [BP-1068624987-172.17.0.2-1731978579487 heartbeating to localhost/127.0.0.1:43141 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1068624987-172.17.0.2-1731978579487 (Datanode Uuid 54b8e1eb-7bf8-4760-a6c7-b7e0150a1a88) service to localhost/127.0.0.1:43141 2024-11-19T01:10:00,667 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/cluster_5bcfa593-45b8-16fe-ae11-8157ba1ac254/data/data4/current/BP-1068624987-172.17.0.2-1731978579487 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:10:00,667 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/cluster_5bcfa593-45b8-16fe-ae11-8157ba1ac254/data/data3/current/BP-1068624987-172.17.0.2-1731978579487 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:10:00,667 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:10:00,680 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:10:00,685 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:10:00,690 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:10:00,690 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:10:00,690 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T01:10:00,690 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7180ac25{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:10:00,691 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ff0f915{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:10:00,724 WARN [Thread-1410 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:10:00,727 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc1ae156eeca0bbbe with lease ID 0x5ae1cba4fe4df1e3: from storage DS-74f4d546-c1be-4454-8688-ac7cce9b14fa node DatanodeRegistration(127.0.0.1:45201, datanodeUuid=d0d06abc-686e-4ffd-af0f-d2b6f11995bc, infoPort=39689, infoSecurePort=0, ipcPort=36905, storageInfo=lv=-57;cid=testClusterID;nsid=33876494;c=1731978579487), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:10:00,727 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc1ae156eeca0bbbe with lease ID 0x5ae1cba4fe4df1e3: from storage DS-1ca14107-c1f5-49e6-b1b6-3b373ad17050 node DatanodeRegistration(127.0.0.1:45201, datanodeUuid=d0d06abc-686e-4ffd-af0f-d2b6f11995bc, infoPort=39689, infoSecurePort=0, ipcPort=36905, storageInfo=lv=-57;cid=testClusterID;nsid=33876494;c=1731978579487), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T01:10:00,824 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@10aee185{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/java.io.tmpdir/jetty-localhost-35821-hadoop-hdfs-3_4_1-tests_jar-_-any-6119528697369895137/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:10:00,824 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@39c5f69b{HTTP/1.1, (http/1.1)}{localhost:35821} 2024-11-19T01:10:00,824 INFO [Time-limited test {}] server.Server(415): Started @172120ms 2024-11-19T01:10:00,827 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:10:00,940 WARN [Thread-1441 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:10:00,945 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5b80db53bbf0cc7e with lease ID 0x5ae1cba4fe4df1e4: from storage DS-6915a258-9e19-42b4-aae1-48f3a5f0542c node DatanodeRegistration(127.0.0.1:33989, datanodeUuid=54b8e1eb-7bf8-4760-a6c7-b7e0150a1a88, infoPort=34081, infoSecurePort=0, ipcPort=38915, storageInfo=lv=-57;cid=testClusterID;nsid=33876494;c=1731978579487), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:10:00,945 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5b80db53bbf0cc7e with lease ID 0x5ae1cba4fe4df1e4: from storage DS-5bc4d2fe-2d03-490b-8540-6f0185f950b9 node DatanodeRegistration(127.0.0.1:33989, datanodeUuid=54b8e1eb-7bf8-4760-a6c7-b7e0150a1a88, infoPort=34081, infoSecurePort=0, ipcPort=38915, storageInfo=lv=-57;cid=testClusterID;nsid=33876494;c=1731978579487), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:10:01,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:01,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:01,859 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-19T01:10:01,861 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-19T01:10:01,863 ERROR [FSHLog-0-hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d-prefix:5134ffc85563,41477,1731978580633 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39925,DS-6915a258-9e19-42b4-aae1-48f3a5f0542c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:10:01,863 WARN [FSHLog-0-hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d-prefix:5134ffc85563,41477,1731978580633 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39925,DS-6915a258-9e19-42b4-aae1-48f3a5f0542c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:10:01,863 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5134ffc85563%2C41477%2C1731978580633:(num 1731978596363) roll requested 2024-11-19T01:10:01,864 INFO [regionserver/5134ffc85563:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C41477%2C1731978580633.1731978601863 2024-11-19T01:10:01,873 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978596363 newFile=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978601863 2024-11-19T01:10:01,874 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:01,874 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:01,874 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:01,874 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:01,874 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:01,874 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978596363 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978601863 2024-11-19T01:10:01,875 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39925,DS-6915a258-9e19-42b4-aae1-48f3a5f0542c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:10:01,875 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39925,DS-6915a258-9e19-42b4-aae1-48f3a5f0542c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:10:01,875 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978596363 2024-11-19T01:10:01,876 WARN [IPC Server handler 2 on default port 43141 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978596363 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-19T01:10:01,876 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978596363 after 1ms 2024-11-19T01:10:01,884 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39689:39689),(127.0.0.1/127.0.0.1:34081:34081)] 2024-11-19T01:10:01,884 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978596363 is not closed yet, will try archiving it next time 2024-11-19T01:10:02,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:02,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:03,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:03,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:03,887 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C41477%2C1731978580633.1731978603887 2024-11-19T01:10:03,893 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978601863 newFile=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978603887 2024-11-19T01:10:03,893 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:03,893 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:03,893 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:03,894 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:03,894 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:03,894 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978601863 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978603887 2024-11-19T01:10:03,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33989 is added to blk_1073741838_1019 (size=1264) 2024-11-19T01:10:03,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741838_1019 (size=1264) 2024-11-19T01:10:03,896 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978596363 is not closed yet, will try archiving it next time 2024-11-19T01:10:03,897 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39689:39689),(127.0.0.1/127.0.0.1:34081:34081)] 2024-11-19T01:10:03,897 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978596363 is not closed yet, will try archiving it next time 2024-11-19T01:10:03,897 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978581817 2024-11-19T01:10:03,897 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978581817 2024-11-19T01:10:03,898 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978581817 after 1ms 2024-11-19T01:10:03,898 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978581817 2024-11-19T01:10:03,908 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731978582764/Put/vlen=218/seqid=0] 2024-11-19T01:10:03,908 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731978592462/Put/vlen=1045/seqid=0] 2024-11-19T01:10:03,908 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978581817 2024-11-19T01:10:03,908 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978596363 2024-11-19T01:10:03,908 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978596363 2024-11-19T01:10:03,909 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978596363 after 1ms 2024-11-19T01:10:03,909 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978596363 2024-11-19T01:10:03,913 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731978596362/Put/vlen=1045/seqid=0] 2024-11-19T01:10:03,913 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731978598388/Put/vlen=1045/seqid=0] 2024-11-19T01:10:03,913 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978596363 2024-11-19T01:10:03,913 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978601863 2024-11-19T01:10:03,913 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978601863 2024-11-19T01:10:03,925 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978601863 after 12ms 2024-11-19T01:10:03,925 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978601863 2024-11-19T01:10:03,930 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731978601863/Put/vlen=1045/seqid=0] 2024-11-19T01:10:03,930 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978603887 2024-11-19T01:10:03,930 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978603887 2024-11-19T01:10:03,931 WARN [IPC Server handler 1 on default port 43141 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978603887 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-19T01:10:03,931 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978603887 after 1ms 2024-11-19T01:10:04,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:04,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:04,727 WARN [ResponseProcessor for block BP-1068624987-172.17.0.2-1731978579487:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1068624987-172.17.0.2-1731978579487:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:10:04,727 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_188906940_22 at /127.0.0.1:44542 [Receiving block BP-1068624987-172.17.0.2-1731978579487:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:45201:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44542 dst: /127.0.0.1:45201 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:45201 remote=/127.0.0.1:44542]. Total timeout mills is 60000, 59165 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:10:04,728 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_188906940_22 at /127.0.0.1:44400 [Receiving block BP-1068624987-172.17.0.2-1731978579487:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:33989:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44400 dst: /127.0.0.1:33989 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:10:04,728 WARN [DataStreamer for file /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978603887 block BP-1068624987-172.17.0.2-1731978579487:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1068624987-172.17.0.2-1731978579487:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45201,DS-74f4d546-c1be-4454-8688-ac7cce9b14fa,DISK], DatanodeInfoWithStorage[127.0.0.1:33989,DS-6915a258-9e19-42b4-aae1-48f3a5f0542c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45201,DS-74f4d546-c1be-4454-8688-ac7cce9b14fa,DISK]) is bad. 2024-11-19T01:10:04,729 WARN [DataStreamer for file /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978603887 block BP-1068624987-172.17.0.2-1731978579487:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1068624987-172.17.0.2-1731978579487:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:10:04,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741839_1022 (size=85) 2024-11-19T01:10:04,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33989 is added to blk_1073741839_1022 (size=85) 2024-11-19T01:10:05,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:05,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:05,727 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-19T01:10:05,877 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978596363 after 4002ms 2024-11-19T01:10:06,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:06,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:07,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:07,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:07,932 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978603887 after 4002ms 2024-11-19T01:10:07,932 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978603887 2024-11-19T01:10:07,937 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978603887 2024-11-19T01:10:07,938 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-19T01:10:07,938 ERROR [FSHLog-0-hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d-prefix:5134ffc85563,41477,1731978580633.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40001,DS-74f4d546-c1be-4454-8688-ac7cce9b14fa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:10:07,938 WARN [FSHLog-0-hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d-prefix:5134ffc85563,41477,1731978580633.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40001,DS-74f4d546-c1be-4454-8688-ac7cce9b14fa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:10:07,938 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5134ffc85563%2C41477%2C1731978580633.meta:.meta(num 1731978582133) roll requested 2024-11-19T01:10:07,939 INFO [regionserver/5134ffc85563:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C41477%2C1731978580633.meta.1731978607939.meta 2024-11-19T01:10:07,948 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:07,948 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:07,948 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:07,948 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:07,949 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:07,949 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.meta.1731978582133.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.meta.1731978607939.meta 2024-11-19T01:10:07,949 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40001,DS-74f4d546-c1be-4454-8688-ac7cce9b14fa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:10:07,949 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40001,DS-74f4d546-c1be-4454-8688-ac7cce9b14fa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:10:07,949 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.meta.1731978582133.meta 2024-11-19T01:10:07,950 WARN [IPC Server handler 4 on default port 43141 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.meta.1731978582133.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1013 2024-11-19T01:10:07,950 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.meta.1731978582133.meta after 1ms 2024-11-19T01:10:07,955 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34081:34081),(127.0.0.1/127.0.0.1:39689:39689)] 2024-11-19T01:10:07,955 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.meta.1731978582133.meta is not closed yet, will try archiving it next time 2024-11-19T01:10:07,977 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740/.tmp/info/d8d9bf71f26940fb9aaafb6100b9026d is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3./info:regioninfo/1731978582770/Put/seqid=0 2024-11-19T01:10:07,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33989 is added to blk_1073741841_1025 (size=7125) 2024-11-19T01:10:07,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741841_1025 (size=7125) 2024-11-19T01:10:07,993 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740/.tmp/info/d8d9bf71f26940fb9aaafb6100b9026d 2024-11-19T01:10:08,029 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740/.tmp/ns/035455d1fb6b478f8cfeced896475a71 is 43, key is default/ns:d/1731978582220/Put/seqid=0 2024-11-19T01:10:08,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741842_1026 (size=5153) 2024-11-19T01:10:08,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33989 is added to blk_1073741842_1026 (size=5153) 2024-11-19T01:10:08,063 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740/.tmp/ns/035455d1fb6b478f8cfeced896475a71 2024-11-19T01:10:08,097 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740/.tmp/table/5630c506a02d48edbb80e38c7a552786 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731978582782/Put/seqid=0 2024-11-19T01:10:08,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33989 is added to blk_1073741843_1027 (size=5438) 2024-11-19T01:10:08,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741843_1027 (size=5438) 2024-11-19T01:10:08,117 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740/.tmp/table/5630c506a02d48edbb80e38c7a552786 2024-11-19T01:10:08,125 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740/.tmp/info/d8d9bf71f26940fb9aaafb6100b9026d as hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740/info/d8d9bf71f26940fb9aaafb6100b9026d 2024-11-19T01:10:08,132 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740/info/d8d9bf71f26940fb9aaafb6100b9026d, entries=10, sequenceid=11, filesize=7.0 K 2024-11-19T01:10:08,133 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740/.tmp/ns/035455d1fb6b478f8cfeced896475a71 as hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740/ns/035455d1fb6b478f8cfeced896475a71 2024-11-19T01:10:08,140 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740/ns/035455d1fb6b478f8cfeced896475a71, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T01:10:08,141 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740/.tmp/table/5630c506a02d48edbb80e38c7a552786 as hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740/table/5630c506a02d48edbb80e38c7a552786 2024-11-19T01:10:08,156 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740/table/5630c506a02d48edbb80e38c7a552786, entries=2, sequenceid=11, filesize=5.3 K 2024-11-19T01:10:08,157 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 220ms, sequenceid=11, compaction requested=false 2024-11-19T01:10:08,158 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-19T01:10:08,158 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 240a07b85f7d48a22908895ae87cf7f3 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-19T01:10:08,158 ERROR [FSHLog-0-hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d-prefix:5134ffc85563,41477,1731978580633 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1068624987-172.17.0.2-1731978579487:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:10:08,159 WARN [FSHLog-0-hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d-prefix:5134ffc85563,41477,1731978580633 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1068624987-172.17.0.2-1731978579487:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:10:08,159 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5134ffc85563%2C41477%2C1731978580633:(num 1731978603887) roll requested 2024-11-19T01:10:08,159 INFO [regionserver/5134ffc85563:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C41477%2C1731978580633.1731978608159 2024-11-19T01:10:08,170 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978603887 newFile=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978608159 2024-11-19T01:10:08,171 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:08,171 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:08,172 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:08,172 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:08,172 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:08,172 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978603887 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978608159 2024-11-19T01:10:08,172 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1068624987-172.17.0.2-1731978579487:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:10:08,173 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1068624987-172.17.0.2-1731978579487:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:10:08,173 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978603887 2024-11-19T01:10:08,174 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978603887 after 1ms 2024-11-19T01:10:08,179 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.1731978603887 to hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/oldWALs/5134ffc85563%2C41477%2C1731978580633.1731978603887 2024-11-19T01:10:08,185 DEBUG [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39689:39689),(127.0.0.1/127.0.0.1:34081:34081)] 2024-11-19T01:10:08,207 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/default/TestLogRolling-testLogRollOnPipelineRestart/240a07b85f7d48a22908895ae87cf7f3/.tmp/info/b39ccb0e49dc42af94cbb50271f1416b is 1080, key is row1002/info:/1731978592462/Put/seqid=0 2024-11-19T01:10:08,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33989 is added to blk_1073741845_1029 (size=9270) 2024-11-19T01:10:08,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741845_1029 (size=9270) 2024-11-19T01:10:08,217 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/default/TestLogRolling-testLogRollOnPipelineRestart/240a07b85f7d48a22908895ae87cf7f3/.tmp/info/b39ccb0e49dc42af94cbb50271f1416b 2024-11-19T01:10:08,224 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/default/TestLogRolling-testLogRollOnPipelineRestart/240a07b85f7d48a22908895ae87cf7f3/.tmp/info/b39ccb0e49dc42af94cbb50271f1416b as hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/default/TestLogRolling-testLogRollOnPipelineRestart/240a07b85f7d48a22908895ae87cf7f3/info/b39ccb0e49dc42af94cbb50271f1416b 2024-11-19T01:10:08,229 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/default/TestLogRolling-testLogRollOnPipelineRestart/240a07b85f7d48a22908895ae87cf7f3/info/b39ccb0e49dc42af94cbb50271f1416b, entries=4, sequenceid=8, filesize=9.1 K 2024-11-19T01:10:08,231 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 240a07b85f7d48a22908895ae87cf7f3 in 72ms, sequenceid=8, compaction requested=false 2024-11-19T01:10:08,231 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 240a07b85f7d48a22908895ae87cf7f3: 2024-11-19T01:10:08,237 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T01:10:08,237 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T01:10:08,237 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:10:08,237 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:10:08,238 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:10:08,238 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T01:10:08,238 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T01:10:08,238 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2058373345, stopped=false 2024-11-19T01:10:08,238 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5134ffc85563,36495,1731978580491 2024-11-19T01:10:08,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41477-0x101088ac6990001, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T01:10:08,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T01:10:08,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41477-0x101088ac6990001, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:10:08,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:10:08,240 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T01:10:08,240 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:10:08,241 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41477-0x101088ac6990001, quorum=127.0.0.1:57473, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:10:08,241 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T01:10:08,241 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:10:08,241 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:10:08,242 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5134ffc85563,41477,1731978580633' ***** 2024-11-19T01:10:08,242 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T01:10:08,242 INFO [RS:0;5134ffc85563:41477 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T01:10:08,242 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T01:10:08,242 INFO [RS:0;5134ffc85563:41477 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T01:10:08,242 INFO [RS:0;5134ffc85563:41477 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T01:10:08,242 INFO [RS:0;5134ffc85563:41477 {}] regionserver.HRegionServer(3091): Received CLOSE for 240a07b85f7d48a22908895ae87cf7f3 2024-11-19T01:10:08,242 INFO [RS:0;5134ffc85563:41477 {}] regionserver.HRegionServer(959): stopping server 5134ffc85563,41477,1731978580633 2024-11-19T01:10:08,242 INFO [RS:0;5134ffc85563:41477 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T01:10:08,243 INFO [RS:0;5134ffc85563:41477 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5134ffc85563:41477. 2024-11-19T01:10:08,243 DEBUG [RS:0;5134ffc85563:41477 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:10:08,243 DEBUG [RS:0;5134ffc85563:41477 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:10:08,243 INFO [RS:0;5134ffc85563:41477 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T01:10:08,243 INFO [RS:0;5134ffc85563:41477 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T01:10:08,243 INFO [RS:0;5134ffc85563:41477 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T01:10:08,243 INFO [RS:0;5134ffc85563:41477 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T01:10:08,243 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 240a07b85f7d48a22908895ae87cf7f3, disabling compactions & flushes 2024-11-19T01:10:08,243 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3. 2024-11-19T01:10:08,243 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3. 2024-11-19T01:10:08,243 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3. after waiting 0 ms 2024-11-19T01:10:08,243 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3. 2024-11-19T01:10:08,244 INFO [RS:0;5134ffc85563:41477 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T01:10:08,244 DEBUG [RS:0;5134ffc85563:41477 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 240a07b85f7d48a22908895ae87cf7f3=TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3.} 2024-11-19T01:10:08,244 DEBUG [RS:0;5134ffc85563:41477 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 240a07b85f7d48a22908895ae87cf7f3 2024-11-19T01:10:08,244 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T01:10:08,244 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T01:10:08,244 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T01:10:08,244 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T01:10:08,244 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T01:10:08,250 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/default/TestLogRolling-testLogRollOnPipelineRestart/240a07b85f7d48a22908895ae87cf7f3/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-19T01:10:08,250 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T01:10:08,251 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3. 2024-11-19T01:10:08,251 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T01:10:08,251 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 240a07b85f7d48a22908895ae87cf7f3: Waiting for close lock at 1731978608243Running coprocessor pre-close hooks at 1731978608243Disabling compacts and flushes for region at 1731978608243Disabling writes for close at 1731978608243Writing region close event to WAL at 1731978608244 (+1 ms)Running coprocessor post-close hooks at 1731978608251 (+7 ms)Closed at 1731978608251 2024-11-19T01:10:08,251 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T01:10:08,251 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731978608244Running coprocessor pre-close hooks at 1731978608244Disabling compacts and flushes for region at 1731978608244Disabling writes for close at 1731978608244Writing region close event to WAL at 1731978608246 (+2 ms)Running coprocessor post-close hooks at 1731978608251 (+5 ms)Closed at 1731978608251 2024-11-19T01:10:08,251 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731978582365.240a07b85f7d48a22908895ae87cf7f3. 2024-11-19T01:10:08,251 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T01:10:08,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:08,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:08,444 INFO [RS:0;5134ffc85563:41477 {}] regionserver.HRegionServer(976): stopping server 5134ffc85563,41477,1731978580633; all regions closed. 2024-11-19T01:10:08,445 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:08,445 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:08,445 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:08,445 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:08,445 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:08,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741840_1023 (size=825) 2024-11-19T01:10:08,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33989 is added to blk_1073741840_1023 (size=825) 2024-11-19T01:10:08,659 INFO [regionserver/5134ffc85563:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T01:10:08,659 INFO [regionserver/5134ffc85563:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T01:10:08,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741834_1024 (size=2416) 2024-11-19T01:10:09,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:09,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:09,684 INFO [regionserver/5134ffc85563:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T01:10:09,946 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-19T01:10:10,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:10,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:10,459 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T01:10:11,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:11,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:11,952 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.meta.1731978582133.meta after 4003ms 2024-11-19T01:10:11,952 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/WALs/5134ffc85563,41477,1731978580633/5134ffc85563%2C41477%2C1731978580633.meta.1731978582133.meta to hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/oldWALs/5134ffc85563%2C41477%2C1731978580633.meta.1731978582133.meta 2024-11-19T01:10:11,957 DEBUG [RS:0;5134ffc85563:41477 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/oldWALs 2024-11-19T01:10:11,957 INFO [RS:0;5134ffc85563:41477 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5134ffc85563%2C41477%2C1731978580633.meta:.meta(num 1731978607939) 2024-11-19T01:10:11,961 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:11,961 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:11,962 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:11,962 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:11,962 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:11,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33989 is added to blk_1073741844_1028 (size=1162) 2024-11-19T01:10:11,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741844_1028 (size=1162) 2024-11-19T01:10:11,978 DEBUG [RS:0;5134ffc85563:41477 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/oldWALs 2024-11-19T01:10:11,978 INFO [RS:0;5134ffc85563:41477 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5134ffc85563%2C41477%2C1731978580633:(num 1731978608159) 2024-11-19T01:10:11,978 DEBUG [RS:0;5134ffc85563:41477 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:10:11,978 INFO [RS:0;5134ffc85563:41477 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T01:10:11,979 INFO [RS:0;5134ffc85563:41477 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T01:10:11,979 INFO [RS:0;5134ffc85563:41477 {}] hbase.ChoreService(370): Chore service for: regionserver/5134ffc85563:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T01:10:11,979 INFO [RS:0;5134ffc85563:41477 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T01:10:11,979 INFO [RS:0;5134ffc85563:41477 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41477 2024-11-19T01:10:11,979 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T01:10:11,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41477-0x101088ac6990001, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5134ffc85563,41477,1731978580633 2024-11-19T01:10:11,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T01:10:11,982 INFO [RS:0;5134ffc85563:41477 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T01:10:11,983 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5134ffc85563,41477,1731978580633] 2024-11-19T01:10:11,985 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5134ffc85563,41477,1731978580633 already deleted, retry=false 2024-11-19T01:10:11,985 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5134ffc85563,41477,1731978580633 expired; onlineServers=0 2024-11-19T01:10:11,985 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5134ffc85563,36495,1731978580491' ***** 2024-11-19T01:10:11,985 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T01:10:11,986 INFO [M:0;5134ffc85563:36495 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T01:10:11,986 INFO [M:0;5134ffc85563:36495 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T01:10:11,986 DEBUG [M:0;5134ffc85563:36495 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T01:10:11,986 DEBUG [M:0;5134ffc85563:36495 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T01:10:11,986 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T01:10:11,986 DEBUG [master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978581508 {}] cleaner.HFileCleaner(306): Exit Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978581508,5,FailOnTimeoutGroup] 2024-11-19T01:10:11,986 DEBUG [master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978581506 {}] cleaner.HFileCleaner(306): Exit Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978581506,5,FailOnTimeoutGroup] 2024-11-19T01:10:11,987 INFO [M:0;5134ffc85563:36495 {}] hbase.ChoreService(370): Chore service for: master/5134ffc85563:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T01:10:11,987 INFO [M:0;5134ffc85563:36495 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T01:10:11,987 DEBUG [M:0;5134ffc85563:36495 {}] master.HMaster(1795): Stopping service threads 2024-11-19T01:10:11,987 INFO [M:0;5134ffc85563:36495 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T01:10:11,987 INFO [M:0;5134ffc85563:36495 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T01:10:11,987 INFO [M:0;5134ffc85563:36495 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T01:10:11,987 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T01:10:11,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T01:10:11,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:10:11,989 DEBUG [M:0;5134ffc85563:36495 {}] zookeeper.ZKUtil(347): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T01:10:11,989 WARN [M:0;5134ffc85563:36495 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T01:10:11,989 INFO [M:0;5134ffc85563:36495 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/.lastflushedseqids 2024-11-19T01:10:12,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741846_1030 (size=139) 2024-11-19T01:10:12,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33989 is added to blk_1073741846_1030 (size=139) 2024-11-19T01:10:12,025 INFO [M:0;5134ffc85563:36495 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T01:10:12,025 INFO [M:0;5134ffc85563:36495 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T01:10:12,025 DEBUG [M:0;5134ffc85563:36495 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T01:10:12,025 INFO [M:0;5134ffc85563:36495 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:10:12,025 DEBUG [M:0;5134ffc85563:36495 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:10:12,025 DEBUG [M:0;5134ffc85563:36495 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T01:10:12,025 DEBUG [M:0;5134ffc85563:36495 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:10:12,025 INFO [M:0;5134ffc85563:36495 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-19T01:10:12,026 ERROR [FSHLog-0-hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData-prefix:5134ffc85563,36495,1731978580491 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40001,DS-74f4d546-c1be-4454-8688-ac7cce9b14fa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:10:12,026 WARN [FSHLog-0-hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData-prefix:5134ffc85563,36495,1731978580491 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40001,DS-74f4d546-c1be-4454-8688-ac7cce9b14fa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:10:12,026 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 5134ffc85563%2C36495%2C1731978580491:(num 1731978581331) roll requested 2024-11-19T01:10:12,027 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C36495%2C1731978580491.1731978612026 2024-11-19T01:10:12,044 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:12,044 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:12,044 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:12,044 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:12,044 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:12,045 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/WALs/5134ffc85563,36495,1731978580491/5134ffc85563%2C36495%2C1731978580491.1731978581331 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/WALs/5134ffc85563,36495,1731978580491/5134ffc85563%2C36495%2C1731978580491.1731978612026 2024-11-19T01:10:12,045 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40001,DS-74f4d546-c1be-4454-8688-ac7cce9b14fa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:10:12,045 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40001,DS-74f4d546-c1be-4454-8688-ac7cce9b14fa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T01:10:12,046 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/WALs/5134ffc85563,36495,1731978580491/5134ffc85563%2C36495%2C1731978580491.1731978581331 2024-11-19T01:10:12,046 WARN [IPC Server handler 2 on default port 43141 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/WALs/5134ffc85563,36495,1731978580491/5134ffc85563%2C36495%2C1731978580491.1731978581331 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-19T01:10:12,047 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/WALs/5134ffc85563,36495,1731978580491/5134ffc85563%2C36495%2C1731978580491.1731978581331 after 0ms 2024-11-19T01:10:12,063 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39689:39689),(127.0.0.1/127.0.0.1:34081:34081)] 2024-11-19T01:10:12,063 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/WALs/5134ffc85563,36495,1731978580491/5134ffc85563%2C36495%2C1731978580491.1731978581331 is not closed yet, will try archiving it next time 2024-11-19T01:10:12,084 DEBUG [M:0;5134ffc85563:36495 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/928e10c5c3b548bea075e8e0868b751f is 82, key is hbase:meta,,1/info:regioninfo/1731978582199/Put/seqid=0 2024-11-19T01:10:12,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41477-0x101088ac6990001, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:10:12,085 INFO [RS:0;5134ffc85563:41477 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T01:10:12,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41477-0x101088ac6990001, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:10:12,085 INFO [RS:0;5134ffc85563:41477 {}] regionserver.HRegionServer(1031): Exiting; stopping=5134ffc85563,41477,1731978580633; zookeeper connection closed. 2024-11-19T01:10:12,086 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4d9cebf1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4d9cebf1 2024-11-19T01:10:12,086 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T01:10:12,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741848_1033 (size=5672) 2024-11-19T01:10:12,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33989 is added to blk_1073741848_1033 (size=5672) 2024-11-19T01:10:12,096 INFO [M:0;5134ffc85563:36495 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/928e10c5c3b548bea075e8e0868b751f 2024-11-19T01:10:12,125 DEBUG [M:0;5134ffc85563:36495 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a23d28e7f11e430a9d628550fded75fb is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731978582788/Put/seqid=0 2024-11-19T01:10:12,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33989 is added to blk_1073741849_1034 (size=6119) 2024-11-19T01:10:12,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741849_1034 (size=6119) 2024-11-19T01:10:12,131 INFO [M:0;5134ffc85563:36495 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a23d28e7f11e430a9d628550fded75fb 2024-11-19T01:10:12,174 DEBUG [M:0;5134ffc85563:36495 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/22934c0f8e9848a7823ca500d8023ca0 is 69, key is 5134ffc85563,41477,1731978580633/rs:state/1731978581565/Put/seqid=0 2024-11-19T01:10:12,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33989 is added to blk_1073741850_1035 (size=5156) 2024-11-19T01:10:12,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741850_1035 (size=5156) 2024-11-19T01:10:12,185 INFO [M:0;5134ffc85563:36495 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/22934c0f8e9848a7823ca500d8023ca0 2024-11-19T01:10:12,213 DEBUG [M:0;5134ffc85563:36495 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/175a544749644bf48f0ae90c53a8f0ae is 52, key is load_balancer_on/state:d/1731978582350/Put/seqid=0 2024-11-19T01:10:12,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741851_1036 (size=5056) 2024-11-19T01:10:12,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33989 is added to blk_1073741851_1036 (size=5056) 2024-11-19T01:10:12,233 INFO [M:0;5134ffc85563:36495 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/175a544749644bf48f0ae90c53a8f0ae 2024-11-19T01:10:12,245 DEBUG [M:0;5134ffc85563:36495 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/928e10c5c3b548bea075e8e0868b751f as hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/928e10c5c3b548bea075e8e0868b751f 2024-11-19T01:10:12,254 INFO [M:0;5134ffc85563:36495 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/928e10c5c3b548bea075e8e0868b751f, entries=8, sequenceid=56, filesize=5.5 K 2024-11-19T01:10:12,255 DEBUG [M:0;5134ffc85563:36495 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a23d28e7f11e430a9d628550fded75fb as hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a23d28e7f11e430a9d628550fded75fb 2024-11-19T01:10:12,266 INFO [M:0;5134ffc85563:36495 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a23d28e7f11e430a9d628550fded75fb, entries=6, sequenceid=56, filesize=6.0 K 2024-11-19T01:10:12,267 DEBUG [M:0;5134ffc85563:36495 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/22934c0f8e9848a7823ca500d8023ca0 as hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/22934c0f8e9848a7823ca500d8023ca0 2024-11-19T01:10:12,274 INFO [M:0;5134ffc85563:36495 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/22934c0f8e9848a7823ca500d8023ca0, entries=1, sequenceid=56, filesize=5.0 K 2024-11-19T01:10:12,275 DEBUG [M:0;5134ffc85563:36495 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/175a544749644bf48f0ae90c53a8f0ae as hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/175a544749644bf48f0ae90c53a8f0ae 2024-11-19T01:10:12,284 INFO [M:0;5134ffc85563:36495 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/175a544749644bf48f0ae90c53a8f0ae, entries=1, sequenceid=56, filesize=4.9 K 2024-11-19T01:10:12,286 INFO [M:0;5134ffc85563:36495 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 261ms, sequenceid=56, compaction requested=false 2024-11-19T01:10:12,292 INFO [M:0;5134ffc85563:36495 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:10:12,292 DEBUG [M:0;5134ffc85563:36495 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731978612025Disabling compacts and flushes for region at 1731978612025Disabling writes for close at 1731978612025Obtaining lock to block concurrent updates at 1731978612025Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731978612025Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731978612026 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731978612065 (+39 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731978612065Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731978612084 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731978612084Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731978612104 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731978612124 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731978612124Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731978612140 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731978612173 (+33 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731978612173Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731978612192 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731978612212 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731978612212Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7094e872: reopening flushed file at 1731978612244 (+32 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37ea0b4b: reopening flushed file at 1731978612255 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c1ecde1: reopening flushed file at 1731978612266 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16e80e5d: reopening flushed file at 1731978612274 (+8 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 261ms, sequenceid=56, compaction requested=false at 1731978612286 (+12 ms)Writing region close event to WAL at 1731978612292 (+6 ms)Closed at 1731978612292 2024-11-19T01:10:12,292 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:12,293 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:12,293 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:12,293 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:12,293 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:12,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33989 is added to blk_1073741847_1031 (size=757) 2024-11-19T01:10:12,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741847_1031 (size=757) 2024-11-19T01:10:12,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:12,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:12,948 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-19T01:10:13,252 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,252 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,278 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,282 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:13,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:13,786 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T01:10:13,787 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,788 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,788 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,789 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,816 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,821 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,821 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:13,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:14,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T01:10:14,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T01:10:14,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T01:10:14,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-19T01:10:14,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:14,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:15,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:15,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:16,047 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/WALs/5134ffc85563,36495,1731978580491/5134ffc85563%2C36495%2C1731978580491.1731978581331 after 4001ms 2024-11-19T01:10:16,048 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/WALs/5134ffc85563,36495,1731978580491/5134ffc85563%2C36495%2C1731978580491.1731978581331 to hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/oldWALs/5134ffc85563%2C36495%2C1731978580491.1731978581331 2024-11-19T01:10:16,050 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/MasterData/oldWALs/5134ffc85563%2C36495%2C1731978580491.1731978581331 to hdfs://localhost:43141/user/jenkins/test-data/acdbd40a-f0dd-caab-928c-528bbf81a11d/oldWALs/5134ffc85563%2C36495%2C1731978580491.1731978581331$masterlocalwal$ 2024-11-19T01:10:16,051 INFO [M:0;5134ffc85563:36495 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T01:10:16,051 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T01:10:16,051 INFO [M:0;5134ffc85563:36495 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36495 2024-11-19T01:10:16,051 INFO [M:0;5134ffc85563:36495 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T01:10:16,153 INFO [M:0;5134ffc85563:36495 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T01:10:16,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:10:16,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36495-0x101088ac6990000, quorum=127.0.0.1:57473, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:10:16,155 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@10aee185{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:10:16,156 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@39c5f69b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:10:16,156 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:10:16,156 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ff0f915{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:10:16,156 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7180ac25{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/hadoop.log.dir/,STOPPED} 2024-11-19T01:10:16,157 WARN [BP-1068624987-172.17.0.2-1731978579487 heartbeating to localhost/127.0.0.1:43141 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:10:16,157 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:10:16,157 WARN [BP-1068624987-172.17.0.2-1731978579487 heartbeating to localhost/127.0.0.1:43141 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1068624987-172.17.0.2-1731978579487 (Datanode Uuid 54b8e1eb-7bf8-4760-a6c7-b7e0150a1a88) service to localhost/127.0.0.1:43141 2024-11-19T01:10:16,157 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:10:16,158 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/cluster_5bcfa593-45b8-16fe-ae11-8157ba1ac254/data/data3/current/BP-1068624987-172.17.0.2-1731978579487 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:10:16,158 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/cluster_5bcfa593-45b8-16fe-ae11-8157ba1ac254/data/data4/current/BP-1068624987-172.17.0.2-1731978579487 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:10:16,158 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:10:16,160 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3719e37e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:10:16,160 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@45c55ac5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:10:16,160 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:10:16,160 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45604664{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:10:16,160 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c5aa216{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/hadoop.log.dir/,STOPPED} 2024-11-19T01:10:16,161 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:10:16,161 WARN [BP-1068624987-172.17.0.2-1731978579487 heartbeating to localhost/127.0.0.1:43141 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:10:16,161 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:10:16,161 WARN [BP-1068624987-172.17.0.2-1731978579487 heartbeating to localhost/127.0.0.1:43141 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1068624987-172.17.0.2-1731978579487 (Datanode Uuid d0d06abc-686e-4ffd-af0f-d2b6f11995bc) service to localhost/127.0.0.1:43141 2024-11-19T01:10:16,162 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/cluster_5bcfa593-45b8-16fe-ae11-8157ba1ac254/data/data1/current/BP-1068624987-172.17.0.2-1731978579487 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:10:16,162 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/cluster_5bcfa593-45b8-16fe-ae11-8157ba1ac254/data/data2/current/BP-1068624987-172.17.0.2-1731978579487 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:10:16,162 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:10:16,168 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@fb6244e{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T01:10:16,168 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1915705e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:10:16,168 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:10:16,168 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a3a743f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:10:16,168 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f1af061{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/hadoop.log.dir/,STOPPED} 2024-11-19T01:10:16,174 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T01:10:16,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T01:10:16,200 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43141 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43141 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43141 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43141 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43141 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43141 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43141 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:43141 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=278 (was 274) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3862 (was 4192) 2024-11-19T01:10:16,207 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=278, ProcessCount=11, AvailableMemoryMB=3862 2024-11-19T01:10:16,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T01:10:16,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/hadoop.log.dir so I do NOT create it in target/test-data/07d92ec8-396c-410e-4fbf-03e549721237 2024-11-19T01:10:16,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11f1e4ab-2357-3843-9c9e-cabf4524ecb9/hadoop.tmp.dir so I do NOT create it in target/test-data/07d92ec8-396c-410e-4fbf-03e549721237 2024-11-19T01:10:16,208 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/cluster_b4e9978a-40c5-47e6-5944-88c5de2f23d9, deleteOnExit=true 2024-11-19T01:10:16,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T01:10:16,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/test.cache.data in system properties and HBase conf 2024-11-19T01:10:16,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T01:10:16,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/hadoop.log.dir in system properties and HBase conf 2024-11-19T01:10:16,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T01:10:16,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T01:10:16,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T01:10:16,208 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T01:10:16,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T01:10:16,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T01:10:16,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T01:10:16,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T01:10:16,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T01:10:16,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T01:10:16,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T01:10:16,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T01:10:16,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T01:10:16,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/nfs.dump.dir in system properties and HBase conf 2024-11-19T01:10:16,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/java.io.tmpdir in system properties and HBase conf 2024-11-19T01:10:16,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T01:10:16,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T01:10:16,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T01:10:16,222 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T01:10:16,288 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:10:16,294 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:10:16,295 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:10:16,295 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:10:16,295 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T01:10:16,296 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:10:16,296 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15cd018{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:10:16,296 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a31a089{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:10:16,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:16,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:16,411 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@47df503f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/java.io.tmpdir/jetty-localhost-40783-hadoop-hdfs-3_4_1-tests_jar-_-any-4010566784133592782/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T01:10:16,411 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5f3519da{HTTP/1.1, (http/1.1)}{localhost:40783} 2024-11-19T01:10:16,411 INFO [Time-limited test {}] server.Server(415): Started @187707ms 2024-11-19T01:10:16,424 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T01:10:16,470 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:10:16,473 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:10:16,474 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:10:16,474 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:10:16,474 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T01:10:16,475 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25c02940{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:10:16,475 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5718d675{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:10:16,591 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1af676f5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/java.io.tmpdir/jetty-localhost-34391-hadoop-hdfs-3_4_1-tests_jar-_-any-15123264000142786514/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:10:16,592 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@9cd716d{HTTP/1.1, (http/1.1)}{localhost:34391} 2024-11-19T01:10:16,592 INFO [Time-limited test {}] server.Server(415): Started @187888ms 2024-11-19T01:10:16,593 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:10:16,620 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:10:16,624 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:10:16,624 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:10:16,624 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:10:16,624 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T01:10:16,625 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6df2cf02{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:10:16,625 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4dfe8781{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:10:16,675 WARN [Thread-1636 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/cluster_b4e9978a-40c5-47e6-5944-88c5de2f23d9/data/data2/current/BP-2072489795-172.17.0.2-1731978616239/current, will proceed with Du for space computation calculation, 2024-11-19T01:10:16,675 WARN [Thread-1635 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/cluster_b4e9978a-40c5-47e6-5944-88c5de2f23d9/data/data1/current/BP-2072489795-172.17.0.2-1731978616239/current, will proceed with Du for space computation calculation, 2024-11-19T01:10:16,697 WARN [Thread-1614 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:10:16,700 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9f57ff8b14bb5faa with lease ID 0x2d2eff4af54651d8: Processing first storage report for DS-69c8ab18-27cd-445f-97c2-26c1d15a10de from datanode DatanodeRegistration(127.0.0.1:37199, datanodeUuid=b35ddbac-98e3-4b74-bb2c-55961cdc4137, infoPort=40521, infoSecurePort=0, ipcPort=39751, storageInfo=lv=-57;cid=testClusterID;nsid=917565316;c=1731978616239) 2024-11-19T01:10:16,700 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9f57ff8b14bb5faa with lease ID 0x2d2eff4af54651d8: from storage DS-69c8ab18-27cd-445f-97c2-26c1d15a10de node DatanodeRegistration(127.0.0.1:37199, datanodeUuid=b35ddbac-98e3-4b74-bb2c-55961cdc4137, infoPort=40521, infoSecurePort=0, ipcPort=39751, storageInfo=lv=-57;cid=testClusterID;nsid=917565316;c=1731978616239), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:10:16,700 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9f57ff8b14bb5faa with lease ID 0x2d2eff4af54651d8: Processing first storage report for DS-6fa9bbfe-9398-48c5-bf44-ae2a328846c4 from datanode DatanodeRegistration(127.0.0.1:37199, datanodeUuid=b35ddbac-98e3-4b74-bb2c-55961cdc4137, infoPort=40521, infoSecurePort=0, ipcPort=39751, storageInfo=lv=-57;cid=testClusterID;nsid=917565316;c=1731978616239) 2024-11-19T01:10:16,700 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9f57ff8b14bb5faa with lease ID 0x2d2eff4af54651d8: from storage DS-6fa9bbfe-9398-48c5-bf44-ae2a328846c4 node DatanodeRegistration(127.0.0.1:37199, datanodeUuid=b35ddbac-98e3-4b74-bb2c-55961cdc4137, infoPort=40521, infoSecurePort=0, ipcPort=39751, storageInfo=lv=-57;cid=testClusterID;nsid=917565316;c=1731978616239), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:10:16,744 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@191911fe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/java.io.tmpdir/jetty-localhost-39759-hadoop-hdfs-3_4_1-tests_jar-_-any-818646148741820866/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:10:16,744 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@30d9f702{HTTP/1.1, (http/1.1)}{localhost:39759} 2024-11-19T01:10:16,744 INFO [Time-limited test {}] server.Server(415): Started @188040ms 2024-11-19T01:10:16,746 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:10:16,825 WARN [Thread-1661 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/cluster_b4e9978a-40c5-47e6-5944-88c5de2f23d9/data/data3/current/BP-2072489795-172.17.0.2-1731978616239/current, will proceed with Du for space computation calculation, 2024-11-19T01:10:16,825 WARN [Thread-1662 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/cluster_b4e9978a-40c5-47e6-5944-88c5de2f23d9/data/data4/current/BP-2072489795-172.17.0.2-1731978616239/current, will proceed with Du for space computation calculation, 2024-11-19T01:10:16,841 WARN [Thread-1650 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:10:16,843 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbf55772f4cca83ed with lease ID 0x2d2eff4af54651d9: Processing first storage report for DS-cba89f63-8bfe-4360-b50b-3506ebdff57d from datanode DatanodeRegistration(127.0.0.1:38843, datanodeUuid=8796a9fc-384f-42c7-affc-ce54cd62ab8a, infoPort=38409, infoSecurePort=0, ipcPort=45745, storageInfo=lv=-57;cid=testClusterID;nsid=917565316;c=1731978616239) 2024-11-19T01:10:16,843 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbf55772f4cca83ed with lease ID 0x2d2eff4af54651d9: from storage DS-cba89f63-8bfe-4360-b50b-3506ebdff57d node DatanodeRegistration(127.0.0.1:38843, datanodeUuid=8796a9fc-384f-42c7-affc-ce54cd62ab8a, infoPort=38409, infoSecurePort=0, ipcPort=45745, storageInfo=lv=-57;cid=testClusterID;nsid=917565316;c=1731978616239), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:10:16,843 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbf55772f4cca83ed with lease ID 0x2d2eff4af54651d9: Processing first storage report for DS-c6ce71eb-edd6-4dbf-ae4e-cd83d99be3f2 from datanode DatanodeRegistration(127.0.0.1:38843, datanodeUuid=8796a9fc-384f-42c7-affc-ce54cd62ab8a, infoPort=38409, infoSecurePort=0, ipcPort=45745, storageInfo=lv=-57;cid=testClusterID;nsid=917565316;c=1731978616239) 2024-11-19T01:10:16,843 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbf55772f4cca83ed with lease ID 0x2d2eff4af54651d9: from storage DS-c6ce71eb-edd6-4dbf-ae4e-cd83d99be3f2 node DatanodeRegistration(127.0.0.1:38843, datanodeUuid=8796a9fc-384f-42c7-affc-ce54cd62ab8a, infoPort=38409, infoSecurePort=0, ipcPort=45745, storageInfo=lv=-57;cid=testClusterID;nsid=917565316;c=1731978616239), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:10:16,868 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237 2024-11-19T01:10:16,870 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/cluster_b4e9978a-40c5-47e6-5944-88c5de2f23d9/zookeeper_0, clientPort=63408, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/cluster_b4e9978a-40c5-47e6-5944-88c5de2f23d9/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/cluster_b4e9978a-40c5-47e6-5944-88c5de2f23d9/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T01:10:16,871 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63408 2024-11-19T01:10:16,871 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:10:16,872 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:10:16,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741825_1001 (size=7) 2024-11-19T01:10:16,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741825_1001 (size=7) 2024-11-19T01:10:16,883 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533 with version=8 2024-11-19T01:10:16,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/hbase-staging 2024-11-19T01:10:16,884 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5134ffc85563:0 server-side Connection retries=45 2024-11-19T01:10:16,884 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:10:16,885 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T01:10:16,885 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T01:10:16,885 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:10:16,885 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T01:10:16,885 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T01:10:16,885 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T01:10:16,885 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39169 2024-11-19T01:10:16,887 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39169 connecting to ZooKeeper ensemble=127.0.0.1:63408 2024-11-19T01:10:16,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:391690x0, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T01:10:16,893 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39169-0x101088b54d00000 connected 2024-11-19T01:10:16,912 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:10:16,913 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:10:16,916 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:10:16,916 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533, hbase.cluster.distributed=false 2024-11-19T01:10:16,917 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T01:10:16,919 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39169 2024-11-19T01:10:16,919 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39169 2024-11-19T01:10:16,923 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39169 2024-11-19T01:10:16,924 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39169 2024-11-19T01:10:16,924 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39169 2024-11-19T01:10:16,939 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5134ffc85563:0 server-side Connection retries=45 2024-11-19T01:10:16,939 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:10:16,939 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T01:10:16,939 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T01:10:16,939 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:10:16,939 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T01:10:16,939 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T01:10:16,940 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T01:10:16,940 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39873 2024-11-19T01:10:16,941 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39873 connecting to ZooKeeper ensemble=127.0.0.1:63408 2024-11-19T01:10:16,942 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:10:16,944 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:10:16,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:398730x0, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T01:10:16,948 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:398730x0, quorum=127.0.0.1:63408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:10:16,948 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39873-0x101088b54d00001 connected 2024-11-19T01:10:16,948 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T01:10:16,949 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T01:10:16,950 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39873-0x101088b54d00001, quorum=127.0.0.1:63408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T01:10:16,950 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39873-0x101088b54d00001, quorum=127.0.0.1:63408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T01:10:16,951 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39873 2024-11-19T01:10:16,951 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39873 2024-11-19T01:10:16,951 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39873 2024-11-19T01:10:16,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39873 2024-11-19T01:10:16,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39873 2024-11-19T01:10:16,963 DEBUG [M:0;5134ffc85563:39169 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5134ffc85563:39169 2024-11-19T01:10:16,964 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5134ffc85563,39169,1731978616884 2024-11-19T01:10:16,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39873-0x101088b54d00001, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:10:16,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:10:16,966 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5134ffc85563,39169,1731978616884 2024-11-19T01:10:16,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:10:16,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39873-0x101088b54d00001, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T01:10:16,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39873-0x101088b54d00001, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:10:16,969 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T01:10:16,969 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5134ffc85563,39169,1731978616884 from backup master directory 2024-11-19T01:10:16,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5134ffc85563,39169,1731978616884 2024-11-19T01:10:16,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39873-0x101088b54d00001, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:10:16,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:10:16,970 WARN [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T01:10:16,970 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5134ffc85563,39169,1731978616884 2024-11-19T01:10:16,974 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/hbase.id] with ID: 309585b7-fc0f-4b3b-b87d-4c4507bbb3cd 2024-11-19T01:10:16,974 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/.tmp/hbase.id 2024-11-19T01:10:16,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741826_1002 (size=42) 2024-11-19T01:10:16,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741826_1002 (size=42) 2024-11-19T01:10:16,981 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/.tmp/hbase.id]:[hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/hbase.id] 2024-11-19T01:10:16,990 INFO [master/5134ffc85563:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:10:16,991 INFO [master/5134ffc85563:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T01:10:16,992 INFO [master/5134ffc85563:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-19T01:10:16,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:10:16,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39873-0x101088b54d00001, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:10:17,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741827_1003 (size=196) 2024-11-19T01:10:17,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741827_1003 (size=196) 2024-11-19T01:10:17,001 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T01:10:17,001 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T01:10:17,002 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:10:17,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741828_1004 (size=1189) 2024-11-19T01:10:17,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741828_1004 (size=1189) 2024-11-19T01:10:17,011 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store 2024-11-19T01:10:17,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741829_1005 (size=34) 2024-11-19T01:10:17,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741829_1005 (size=34) 2024-11-19T01:10:17,020 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:10:17,020 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T01:10:17,020 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:10:17,020 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:10:17,020 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T01:10:17,020 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:10:17,020 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:10:17,020 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731978617020Disabling compacts and flushes for region at 1731978617020Disabling writes for close at 1731978617020Writing region close event to WAL at 1731978617020Closed at 1731978617020 2024-11-19T01:10:17,021 WARN [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/.initializing 2024-11-19T01:10:17,021 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/WALs/5134ffc85563,39169,1731978616884 2024-11-19T01:10:17,023 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C39169%2C1731978616884, suffix=, logDir=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/WALs/5134ffc85563,39169,1731978616884, archiveDir=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/oldWALs, maxLogs=10 2024-11-19T01:10:17,024 INFO [master/5134ffc85563:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C39169%2C1731978616884.1731978617024 2024-11-19T01:10:17,028 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/WALs/5134ffc85563,39169,1731978616884/5134ffc85563%2C39169%2C1731978616884.1731978617024 2024-11-19T01:10:17,029 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38409:38409),(127.0.0.1/127.0.0.1:40521:40521)] 2024-11-19T01:10:17,033 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T01:10:17,033 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:10:17,033 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:10:17,033 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:10:17,036 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:10:17,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T01:10:17,037 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:10:17,038 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:10:17,038 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:10:17,039 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T01:10:17,039 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:10:17,039 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:10:17,039 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:10:17,040 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T01:10:17,040 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:10:17,041 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:10:17,041 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:10:17,042 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T01:10:17,042 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:10:17,042 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:10:17,043 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:10:17,043 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:10:17,043 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:10:17,045 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:10:17,045 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:10:17,046 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T01:10:17,047 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:10:17,048 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T01:10:17,049 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=879046, jitterRate=0.11776593327522278}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T01:10:17,050 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731978617033Initializing all the Stores at 1731978617034 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978617034Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978617036 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978617036Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978617036Cleaning up temporary data from old regions at 1731978617045 (+9 ms)Region opened successfully at 1731978617049 (+4 ms) 2024-11-19T01:10:17,050 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T01:10:17,053 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69d0e5e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5134ffc85563/172.17.0.2:0 2024-11-19T01:10:17,054 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T01:10:17,054 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T01:10:17,054 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T01:10:17,054 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T01:10:17,055 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T01:10:17,055 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T01:10:17,055 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T01:10:17,057 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T01:10:17,057 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T01:10:17,059 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T01:10:17,059 INFO [master/5134ffc85563:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T01:10:17,060 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T01:10:17,061 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T01:10:17,061 INFO [master/5134ffc85563:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T01:10:17,062 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T01:10:17,063 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T01:10:17,064 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T01:10:17,065 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T01:10:17,066 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T01:10:17,068 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T01:10:17,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T01:10:17,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39873-0x101088b54d00001, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T01:10:17,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:10:17,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39873-0x101088b54d00001, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:10:17,069 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5134ffc85563,39169,1731978616884, sessionid=0x101088b54d00000, setting cluster-up flag (Was=false) 2024-11-19T01:10:17,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:10:17,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39873-0x101088b54d00001, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:10:17,076 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T01:10:17,077 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5134ffc85563,39169,1731978616884 2024-11-19T01:10:17,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39873-0x101088b54d00001, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:10:17,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:10:17,084 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T01:10:17,085 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5134ffc85563,39169,1731978616884 2024-11-19T01:10:17,086 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T01:10:17,087 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T01:10:17,088 INFO [master/5134ffc85563:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T01:10:17,088 INFO [master/5134ffc85563:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T01:10:17,088 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5134ffc85563,39169,1731978616884 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T01:10:17,089 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:10:17,089 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:10:17,089 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:10:17,089 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:10:17,089 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5134ffc85563:0, corePoolSize=10, maxPoolSize=10 2024-11-19T01:10:17,089 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:10:17,089 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5134ffc85563:0, corePoolSize=2, maxPoolSize=2 2024-11-19T01:10:17,089 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:10:17,090 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731978647090 2024-11-19T01:10:17,090 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T01:10:17,090 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T01:10:17,090 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T01:10:17,090 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T01:10:17,090 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T01:10:17,090 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T01:10:17,091 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,091 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T01:10:17,091 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T01:10:17,091 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:10:17,091 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T01:10:17,091 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T01:10:17,091 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T01:10:17,091 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T01:10:17,091 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978617091,5,FailOnTimeoutGroup] 2024-11-19T01:10:17,092 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978617091,5,FailOnTimeoutGroup] 2024-11-19T01:10:17,092 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,092 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T01:10:17,092 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,092 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,092 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:10:17,092 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T01:10:17,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741831_1007 (size=1321) 2024-11-19T01:10:17,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741831_1007 (size=1321) 2024-11-19T01:10:17,100 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T01:10:17,100 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533 2024-11-19T01:10:17,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741832_1008 (size=32) 2024-11-19T01:10:17,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741832_1008 (size=32) 2024-11-19T01:10:17,106 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:10:17,107 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T01:10:17,108 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T01:10:17,108 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:10:17,109 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:10:17,109 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T01:10:17,110 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T01:10:17,110 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:10:17,110 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:10:17,110 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T01:10:17,111 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T01:10:17,111 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:10:17,112 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:10:17,112 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T01:10:17,113 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T01:10:17,113 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:10:17,113 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:10:17,113 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T01:10:17,114 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740 2024-11-19T01:10:17,114 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740 2024-11-19T01:10:17,115 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T01:10:17,115 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T01:10:17,116 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T01:10:17,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T01:10:17,118 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T01:10:17,119 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=811141, jitterRate=0.031420305371284485}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T01:10:17,119 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731978617106Initializing all the Stores at 1731978617107 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978617107Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978617107Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978617107Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978617107Cleaning up temporary data from old regions at 1731978617115 (+8 ms)Region opened successfully at 1731978617119 (+4 ms) 2024-11-19T01:10:17,120 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T01:10:17,120 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T01:10:17,120 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T01:10:17,120 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T01:10:17,120 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T01:10:17,120 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T01:10:17,120 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731978617120Disabling compacts and flushes for region at 1731978617120Disabling writes for close at 1731978617120Writing region close event to WAL at 1731978617120Closed at 1731978617120 2024-11-19T01:10:17,122 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:10:17,122 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T01:10:17,122 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T01:10:17,123 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T01:10:17,124 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T01:10:17,154 INFO [RS:0;5134ffc85563:39873 {}] regionserver.HRegionServer(746): ClusterId : 309585b7-fc0f-4b3b-b87d-4c4507bbb3cd 2024-11-19T01:10:17,154 DEBUG [RS:0;5134ffc85563:39873 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T01:10:17,156 DEBUG [RS:0;5134ffc85563:39873 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T01:10:17,156 DEBUG [RS:0;5134ffc85563:39873 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T01:10:17,158 DEBUG [RS:0;5134ffc85563:39873 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T01:10:17,158 DEBUG [RS:0;5134ffc85563:39873 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43ff329e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5134ffc85563/172.17.0.2:0 2024-11-19T01:10:17,170 DEBUG [RS:0;5134ffc85563:39873 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5134ffc85563:39873 2024-11-19T01:10:17,170 INFO [RS:0;5134ffc85563:39873 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T01:10:17,170 INFO [RS:0;5134ffc85563:39873 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T01:10:17,170 DEBUG [RS:0;5134ffc85563:39873 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T01:10:17,171 INFO [RS:0;5134ffc85563:39873 {}] regionserver.HRegionServer(2659): reportForDuty to master=5134ffc85563,39169,1731978616884 with port=39873, startcode=1731978616939 2024-11-19T01:10:17,171 DEBUG [RS:0;5134ffc85563:39873 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T01:10:17,173 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45617, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T01:10:17,173 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39169 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5134ffc85563,39873,1731978616939 2024-11-19T01:10:17,173 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39169 {}] master.ServerManager(517): Registering regionserver=5134ffc85563,39873,1731978616939 2024-11-19T01:10:17,175 DEBUG [RS:0;5134ffc85563:39873 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533 2024-11-19T01:10:17,175 DEBUG [RS:0;5134ffc85563:39873 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43883 2024-11-19T01:10:17,175 DEBUG [RS:0;5134ffc85563:39873 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T01:10:17,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T01:10:17,177 DEBUG [RS:0;5134ffc85563:39873 {}] zookeeper.ZKUtil(111): regionserver:39873-0x101088b54d00001, quorum=127.0.0.1:63408, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5134ffc85563,39873,1731978616939 2024-11-19T01:10:17,177 WARN [RS:0;5134ffc85563:39873 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T01:10:17,177 INFO [RS:0;5134ffc85563:39873 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:10:17,178 DEBUG [RS:0;5134ffc85563:39873 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/WALs/5134ffc85563,39873,1731978616939 2024-11-19T01:10:17,178 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5134ffc85563,39873,1731978616939] 2024-11-19T01:10:17,181 INFO [RS:0;5134ffc85563:39873 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T01:10:17,183 INFO [RS:0;5134ffc85563:39873 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T01:10:17,183 INFO [RS:0;5134ffc85563:39873 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T01:10:17,183 INFO [RS:0;5134ffc85563:39873 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,184 INFO [RS:0;5134ffc85563:39873 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T01:10:17,185 INFO [RS:0;5134ffc85563:39873 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T01:10:17,185 INFO [RS:0;5134ffc85563:39873 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,185 DEBUG [RS:0;5134ffc85563:39873 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:10:17,185 DEBUG [RS:0;5134ffc85563:39873 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:10:17,185 DEBUG [RS:0;5134ffc85563:39873 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:10:17,185 DEBUG [RS:0;5134ffc85563:39873 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:10:17,185 DEBUG [RS:0;5134ffc85563:39873 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:10:17,185 DEBUG [RS:0;5134ffc85563:39873 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5134ffc85563:0, corePoolSize=2, maxPoolSize=2 2024-11-19T01:10:17,185 DEBUG [RS:0;5134ffc85563:39873 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:10:17,185 DEBUG [RS:0;5134ffc85563:39873 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:10:17,185 DEBUG [RS:0;5134ffc85563:39873 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:10:17,186 DEBUG [RS:0;5134ffc85563:39873 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:10:17,186 DEBUG [RS:0;5134ffc85563:39873 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:10:17,186 DEBUG [RS:0;5134ffc85563:39873 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:10:17,186 DEBUG [RS:0;5134ffc85563:39873 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5134ffc85563:0, corePoolSize=3, maxPoolSize=3 2024-11-19T01:10:17,186 DEBUG [RS:0;5134ffc85563:39873 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0, corePoolSize=3, maxPoolSize=3 2024-11-19T01:10:17,188 INFO [RS:0;5134ffc85563:39873 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,188 INFO [RS:0;5134ffc85563:39873 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,188 INFO [RS:0;5134ffc85563:39873 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,188 INFO [RS:0;5134ffc85563:39873 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,188 INFO [RS:0;5134ffc85563:39873 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,188 INFO [RS:0;5134ffc85563:39873 {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,39873,1731978616939-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T01:10:17,202 INFO [RS:0;5134ffc85563:39873 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T01:10:17,203 INFO [RS:0;5134ffc85563:39873 {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,39873,1731978616939-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,203 INFO [RS:0;5134ffc85563:39873 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,203 INFO [RS:0;5134ffc85563:39873 {}] regionserver.Replication(171): 5134ffc85563,39873,1731978616939 started 2024-11-19T01:10:17,217 INFO [RS:0;5134ffc85563:39873 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,217 INFO [RS:0;5134ffc85563:39873 {}] regionserver.HRegionServer(1482): Serving as 5134ffc85563,39873,1731978616939, RpcServer on 5134ffc85563/172.17.0.2:39873, sessionid=0x101088b54d00001 2024-11-19T01:10:17,217 DEBUG [RS:0;5134ffc85563:39873 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T01:10:17,217 DEBUG [RS:0;5134ffc85563:39873 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5134ffc85563,39873,1731978616939 2024-11-19T01:10:17,217 DEBUG [RS:0;5134ffc85563:39873 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5134ffc85563,39873,1731978616939' 2024-11-19T01:10:17,217 DEBUG [RS:0;5134ffc85563:39873 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T01:10:17,217 DEBUG [RS:0;5134ffc85563:39873 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T01:10:17,218 DEBUG [RS:0;5134ffc85563:39873 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T01:10:17,218 DEBUG [RS:0;5134ffc85563:39873 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T01:10:17,218 DEBUG [RS:0;5134ffc85563:39873 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5134ffc85563,39873,1731978616939 2024-11-19T01:10:17,218 DEBUG [RS:0;5134ffc85563:39873 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5134ffc85563,39873,1731978616939' 2024-11-19T01:10:17,218 DEBUG [RS:0;5134ffc85563:39873 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T01:10:17,218 DEBUG [RS:0;5134ffc85563:39873 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T01:10:17,218 DEBUG [RS:0;5134ffc85563:39873 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T01:10:17,218 INFO [RS:0;5134ffc85563:39873 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T01:10:17,218 INFO [RS:0;5134ffc85563:39873 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T01:10:17,275 WARN [5134ffc85563:39169 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T01:10:17,321 INFO [RS:0;5134ffc85563:39873 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C39873%2C1731978616939, suffix=, logDir=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/WALs/5134ffc85563,39873,1731978616939, archiveDir=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/oldWALs, maxLogs=32 2024-11-19T01:10:17,321 INFO [RS:0;5134ffc85563:39873 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C39873%2C1731978616939.1731978617321 2024-11-19T01:10:17,328 INFO [RS:0;5134ffc85563:39873 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/WALs/5134ffc85563,39873,1731978616939/5134ffc85563%2C39873%2C1731978616939.1731978617321 2024-11-19T01:10:17,336 DEBUG [RS:0;5134ffc85563:39873 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40521:40521),(127.0.0.1/127.0.0.1:38409:38409)] 2024-11-19T01:10:17,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:17,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:17,525 DEBUG [5134ffc85563:39169 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T01:10:17,526 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5134ffc85563,39873,1731978616939 2024-11-19T01:10:17,527 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5134ffc85563,39873,1731978616939, state=OPENING 2024-11-19T01:10:17,529 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T01:10:17,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:10:17,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39873-0x101088b54d00001, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:10:17,531 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T01:10:17,531 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:10:17,531 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5134ffc85563,39873,1731978616939}] 2024-11-19T01:10:17,531 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:10:17,684 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T01:10:17,686 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52445, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T01:10:17,690 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T01:10:17,690 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:10:17,692 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C39873%2C1731978616939.meta, suffix=.meta, logDir=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/WALs/5134ffc85563,39873,1731978616939, archiveDir=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/oldWALs, maxLogs=32 2024-11-19T01:10:17,692 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C39873%2C1731978616939.meta.1731978617692.meta 2024-11-19T01:10:17,697 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/WALs/5134ffc85563,39873,1731978616939/5134ffc85563%2C39873%2C1731978616939.meta.1731978617692.meta 2024-11-19T01:10:17,698 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38409:38409),(127.0.0.1/127.0.0.1:40521:40521)] 2024-11-19T01:10:17,699 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T01:10:17,699 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T01:10:17,699 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T01:10:17,699 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T01:10:17,700 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T01:10:17,700 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:10:17,700 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T01:10:17,700 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T01:10:17,701 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T01:10:17,702 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T01:10:17,702 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:10:17,702 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:10:17,702 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T01:10:17,703 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T01:10:17,703 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:10:17,703 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:10:17,704 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T01:10:17,704 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T01:10:17,704 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:10:17,705 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:10:17,705 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T01:10:17,705 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T01:10:17,705 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:10:17,705 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:10:17,706 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T01:10:17,706 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740 2024-11-19T01:10:17,707 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740 2024-11-19T01:10:17,708 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T01:10:17,708 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T01:10:17,709 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T01:10:17,710 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T01:10:17,711 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=734271, jitterRate=-0.06632702052593231}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T01:10:17,711 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T01:10:17,711 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731978617700Writing region info on filesystem at 1731978617700Initializing all the Stores at 1731978617701 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978617701Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978617701Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978617701Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978617701Cleaning up temporary data from old regions at 1731978617708 (+7 ms)Running coprocessor post-open hooks at 1731978617711 (+3 ms)Region opened successfully at 1731978617711 2024-11-19T01:10:17,712 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731978617684 2024-11-19T01:10:17,715 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T01:10:17,715 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T01:10:17,715 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5134ffc85563,39873,1731978616939 2024-11-19T01:10:17,716 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5134ffc85563,39873,1731978616939, state=OPEN 2024-11-19T01:10:17,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T01:10:17,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39873-0x101088b54d00001, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T01:10:17,721 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5134ffc85563,39873,1731978616939 2024-11-19T01:10:17,721 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:10:17,721 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:10:17,724 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T01:10:17,724 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5134ffc85563,39873,1731978616939 in 190 msec 2024-11-19T01:10:17,727 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T01:10:17,727 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 602 msec 2024-11-19T01:10:17,728 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:10:17,728 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T01:10:17,730 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T01:10:17,730 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5134ffc85563,39873,1731978616939, seqNum=-1] 2024-11-19T01:10:17,730 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T01:10:17,732 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51249, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T01:10:17,738 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 650 msec 2024-11-19T01:10:17,738 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731978617738, completionTime=-1 2024-11-19T01:10:17,739 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T01:10:17,739 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T01:10:17,741 INFO [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T01:10:17,741 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731978677741 2024-11-19T01:10:17,741 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731978737741 2024-11-19T01:10:17,741 INFO [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-19T01:10:17,742 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,39169,1731978616884-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,742 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,39169,1731978616884-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,742 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,39169,1731978616884-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,742 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5134ffc85563:39169, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,742 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,743 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,744 DEBUG [master/5134ffc85563:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T01:10:17,747 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.777sec 2024-11-19T01:10:17,748 INFO [master/5134ffc85563:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T01:10:17,748 INFO [master/5134ffc85563:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T01:10:17,748 INFO [master/5134ffc85563:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T01:10:17,748 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T01:10:17,748 INFO [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T01:10:17,748 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,39169,1731978616884-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T01:10:17,748 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,39169,1731978616884-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T01:10:17,750 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T01:10:17,750 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T01:10:17,750 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,39169,1731978616884-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:17,754 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65c292b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:10:17,754 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5134ffc85563,39169,-1 for getting cluster id 2024-11-19T01:10:17,754 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T01:10:17,756 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '309585b7-fc0f-4b3b-b87d-4c4507bbb3cd' 2024-11-19T01:10:17,756 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T01:10:17,756 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "309585b7-fc0f-4b3b-b87d-4c4507bbb3cd" 2024-11-19T01:10:17,756 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71e2e835, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:10:17,756 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5134ffc85563,39169,-1] 2024-11-19T01:10:17,757 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T01:10:17,757 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:10:17,758 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33090, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T01:10:17,759 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34fe4f6c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:10:17,759 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T01:10:17,760 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5134ffc85563,39873,1731978616939, seqNum=-1] 2024-11-19T01:10:17,760 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T01:10:17,761 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58292, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T01:10:17,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5134ffc85563,39169,1731978616884 2024-11-19T01:10:17,763 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:10:17,765 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T01:10:17,765 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T01:10:17,766 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 5134ffc85563,39169,1731978616884 2024-11-19T01:10:17,766 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@42b0320 2024-11-19T01:10:17,766 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T01:10:17,767 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33100, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T01:10:17,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T01:10:17,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T01:10:17,768 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T01:10:17,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T01:10:17,770 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T01:10:17,771 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:10:17,771 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-19T01:10:17,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T01:10:17,772 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T01:10:17,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741835_1011 (size=405) 2024-11-19T01:10:17,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741835_1011 (size=405) 2024-11-19T01:10:17,780 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 724c36d36a235dab91c44037e24ee053, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533 2024-11-19T01:10:17,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741836_1012 (size=88) 2024-11-19T01:10:17,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741836_1012 (size=88) 2024-11-19T01:10:17,787 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:10:17,787 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 724c36d36a235dab91c44037e24ee053, disabling compactions & flushes 2024-11-19T01:10:17,787 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:10:17,787 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:10:17,787 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. after waiting 0 ms 2024-11-19T01:10:17,787 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:10:17,787 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:10:17,787 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 724c36d36a235dab91c44037e24ee053: Waiting for close lock at 1731978617787Disabling compacts and flushes for region at 1731978617787Disabling writes for close at 1731978617787Writing region close event to WAL at 1731978617787Closed at 1731978617787 2024-11-19T01:10:17,788 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T01:10:17,789 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731978617788"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731978617788"}]},"ts":"1731978617788"} 2024-11-19T01:10:17,791 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T01:10:17,792 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T01:10:17,792 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731978617792"}]},"ts":"1731978617792"} 2024-11-19T01:10:17,794 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-19T01:10:17,794 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=724c36d36a235dab91c44037e24ee053, ASSIGN}] 2024-11-19T01:10:17,796 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=724c36d36a235dab91c44037e24ee053, ASSIGN 2024-11-19T01:10:17,797 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=724c36d36a235dab91c44037e24ee053, ASSIGN; state=OFFLINE, location=5134ffc85563,39873,1731978616939; forceNewPlan=false, retain=false 2024-11-19T01:10:17,947 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=724c36d36a235dab91c44037e24ee053, regionState=OPENING, regionLocation=5134ffc85563,39873,1731978616939 2024-11-19T01:10:17,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=724c36d36a235dab91c44037e24ee053, ASSIGN because future has completed 2024-11-19T01:10:17,950 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 724c36d36a235dab91c44037e24ee053, server=5134ffc85563,39873,1731978616939}] 2024-11-19T01:10:18,108 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:10:18,108 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 724c36d36a235dab91c44037e24ee053, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053.', STARTKEY => '', ENDKEY => ''} 2024-11-19T01:10:18,109 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 724c36d36a235dab91c44037e24ee053 2024-11-19T01:10:18,109 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:10:18,109 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 724c36d36a235dab91c44037e24ee053 2024-11-19T01:10:18,109 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 724c36d36a235dab91c44037e24ee053 2024-11-19T01:10:18,110 INFO [StoreOpener-724c36d36a235dab91c44037e24ee053-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 724c36d36a235dab91c44037e24ee053 2024-11-19T01:10:18,111 INFO [StoreOpener-724c36d36a235dab91c44037e24ee053-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 724c36d36a235dab91c44037e24ee053 columnFamilyName info 2024-11-19T01:10:18,112 DEBUG [StoreOpener-724c36d36a235dab91c44037e24ee053-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:10:18,112 INFO [StoreOpener-724c36d36a235dab91c44037e24ee053-1 {}] regionserver.HStore(327): Store=724c36d36a235dab91c44037e24ee053/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:10:18,112 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 724c36d36a235dab91c44037e24ee053 2024-11-19T01:10:18,113 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053 2024-11-19T01:10:18,113 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053 2024-11-19T01:10:18,113 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 724c36d36a235dab91c44037e24ee053 2024-11-19T01:10:18,113 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 724c36d36a235dab91c44037e24ee053 2024-11-19T01:10:18,115 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 724c36d36a235dab91c44037e24ee053 2024-11-19T01:10:18,117 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T01:10:18,117 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 724c36d36a235dab91c44037e24ee053; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=733462, jitterRate=-0.06735557317733765}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T01:10:18,117 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 724c36d36a235dab91c44037e24ee053 2024-11-19T01:10:18,118 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 724c36d36a235dab91c44037e24ee053: Running coprocessor pre-open hook at 1731978618109Writing region info on filesystem at 1731978618109Initializing all the Stores at 1731978618110 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978618110Cleaning up temporary data from old regions at 1731978618113 (+3 ms)Running coprocessor post-open hooks at 1731978618117 (+4 ms)Region opened successfully at 1731978618118 (+1 ms) 2024-11-19T01:10:18,119 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053., pid=6, masterSystemTime=1731978618104 2024-11-19T01:10:18,121 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:10:18,121 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:10:18,122 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=724c36d36a235dab91c44037e24ee053, regionState=OPEN, openSeqNum=2, regionLocation=5134ffc85563,39873,1731978616939 2024-11-19T01:10:18,124 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 724c36d36a235dab91c44037e24ee053, server=5134ffc85563,39873,1731978616939 because future has completed 2024-11-19T01:10:18,128 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T01:10:18,128 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 724c36d36a235dab91c44037e24ee053, server=5134ffc85563,39873,1731978616939 in 176 msec 2024-11-19T01:10:18,130 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T01:10:18,131 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=724c36d36a235dab91c44037e24ee053, ASSIGN in 334 msec 2024-11-19T01:10:18,131 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T01:10:18,132 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731978618131"}]},"ts":"1731978618131"} 2024-11-19T01:10:18,133 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-19T01:10:18,134 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T01:10:18,136 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 366 msec 2024-11-19T01:10:18,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:18,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:19,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:19,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:19,821 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T01:10:19,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:19,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:19,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:19,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:19,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:19,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:19,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:19,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:19,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:19,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:19,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:19,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:19,845 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:19,846 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:19,846 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:19,848 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:10:20,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:20,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:21,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:21,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:22,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:22,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:23,181 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T01:10:23,182 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-19T01:10:23,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:23,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:24,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T01:10:24,319 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T01:10:24,320 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T01:10:24,320 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T01:10:24,320 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T01:10:24,320 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-19T01:10:24,320 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T01:10:24,320 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-19T01:10:24,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:24,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:25,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:25,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:26,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:26,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:27,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:27,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:27,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T01:10:27,815 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T01:10:27,815 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-19T01:10:27,819 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T01:10:27,819 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:10:27,822 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053., hostname=5134ffc85563,39873,1731978616939, seqNum=2] 2024-11-19T01:10:27,829 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T01:10:27,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T01:10:27,836 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-19T01:10:27,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-19T01:10:27,838 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T01:10:27,839 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T01:10:28,002 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39873 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-19T01:10:28,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:10:28,003 INFO [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 724c36d36a235dab91c44037e24ee053 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T01:10:28,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/.tmp/info/976c5d1364cc46cdbc1b1d19ff3496ea is 1080, key is row0001/info:/1731978627823/Put/seqid=0 2024-11-19T01:10:28,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741837_1013 (size=6033) 2024-11-19T01:10:28,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741837_1013 (size=6033) 2024-11-19T01:10:28,062 INFO [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/.tmp/info/976c5d1364cc46cdbc1b1d19ff3496ea 2024-11-19T01:10:28,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/.tmp/info/976c5d1364cc46cdbc1b1d19ff3496ea as hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/976c5d1364cc46cdbc1b1d19ff3496ea 2024-11-19T01:10:28,097 INFO [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/976c5d1364cc46cdbc1b1d19ff3496ea, entries=1, sequenceid=5, filesize=5.9 K 2024-11-19T01:10:28,099 INFO [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 724c36d36a235dab91c44037e24ee053 in 96ms, sequenceid=5, compaction requested=false 2024-11-19T01:10:28,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 724c36d36a235dab91c44037e24ee053: 2024-11-19T01:10:28,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:10:28,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-19T01:10:28,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-19T01:10:28,109 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-19T01:10:28,109 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 267 msec 2024-11-19T01:10:28,113 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 280 msec 2024-11-19T01:10:28,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:28,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:29,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:29,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:30,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:30,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:31,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:31,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:32,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:32,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:33,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:33,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:34,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:34,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:35,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:35,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:36,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:36,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:37,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:37,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:37,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-19T01:10:37,865 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T01:10:37,870 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T01:10:37,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T01:10:37,873 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-19T01:10:37,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-19T01:10:37,874 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T01:10:37,875 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T01:10:38,028 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39873 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-19T01:10:38,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:10:38,029 INFO [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 724c36d36a235dab91c44037e24ee053 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T01:10:38,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/.tmp/info/f15d072343a14700aa524a5742fe5714 is 1080, key is row0002/info:/1731978637868/Put/seqid=0 2024-11-19T01:10:38,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741838_1014 (size=6033) 2024-11-19T01:10:38,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741838_1014 (size=6033) 2024-11-19T01:10:38,041 INFO [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/.tmp/info/f15d072343a14700aa524a5742fe5714 2024-11-19T01:10:38,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/.tmp/info/f15d072343a14700aa524a5742fe5714 as hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/f15d072343a14700aa524a5742fe5714 2024-11-19T01:10:38,054 INFO [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/f15d072343a14700aa524a5742fe5714, entries=1, sequenceid=9, filesize=5.9 K 2024-11-19T01:10:38,056 INFO [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 724c36d36a235dab91c44037e24ee053 in 27ms, sequenceid=9, compaction requested=false 2024-11-19T01:10:38,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 724c36d36a235dab91c44037e24ee053: 2024-11-19T01:10:38,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:10:38,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-19T01:10:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-19T01:10:38,062 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-19T01:10:38,062 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 184 msec 2024-11-19T01:10:38,065 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 193 msec 2024-11-19T01:10:38,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:38,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:39,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:39,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:40,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:40,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 after 68057ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:10:40,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:40,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta after 68042ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T01:10:41,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:41,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:42,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:42,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:43,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:43,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:44,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:44,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:45,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:45,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:46,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:46,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:46,867 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T01:10:47,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:47,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:47,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-19T01:10:47,905 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T01:10:47,908 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C39873%2C1731978616939.1731978647908 2024-11-19T01:10:47,913 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:47,913 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:47,913 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:47,913 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:47,914 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:47,914 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/WALs/5134ffc85563,39873,1731978616939/5134ffc85563%2C39873%2C1731978616939.1731978617321 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/WALs/5134ffc85563,39873,1731978616939/5134ffc85563%2C39873%2C1731978616939.1731978647908 2024-11-19T01:10:47,915 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38409:38409),(127.0.0.1/127.0.0.1:40521:40521)] 2024-11-19T01:10:47,915 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/WALs/5134ffc85563,39873,1731978616939/5134ffc85563%2C39873%2C1731978616939.1731978617321 is not closed yet, will try archiving it next time 2024-11-19T01:10:47,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741833_1009 (size=5546) 2024-11-19T01:10:47,915 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T01:10:47,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741833_1009 (size=5546) 2024-11-19T01:10:47,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T01:10:47,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-19T01:10:47,918 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-19T01:10:47,919 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T01:10:47,919 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T01:10:48,072 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39873 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-19T01:10:48,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:10:48,073 INFO [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 724c36d36a235dab91c44037e24ee053 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T01:10:48,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/.tmp/info/9e3eafcf71a74285ba43d13764d159ad is 1080, key is row0003/info:/1731978647906/Put/seqid=0 2024-11-19T01:10:48,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741840_1016 (size=6033) 2024-11-19T01:10:48,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741840_1016 (size=6033) 2024-11-19T01:10:48,083 INFO [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/.tmp/info/9e3eafcf71a74285ba43d13764d159ad 2024-11-19T01:10:48,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/.tmp/info/9e3eafcf71a74285ba43d13764d159ad as hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/9e3eafcf71a74285ba43d13764d159ad 2024-11-19T01:10:48,094 INFO [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/9e3eafcf71a74285ba43d13764d159ad, entries=1, sequenceid=13, filesize=5.9 K 2024-11-19T01:10:48,095 INFO [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 724c36d36a235dab91c44037e24ee053 in 22ms, sequenceid=13, compaction requested=true 2024-11-19T01:10:48,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 724c36d36a235dab91c44037e24ee053: 2024-11-19T01:10:48,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:10:48,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-19T01:10:48,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-19T01:10:48,100 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-19T01:10:48,100 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-11-19T01:10:48,102 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 184 msec 2024-11-19T01:10:48,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:48,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:49,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:49,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:50,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:50,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:51,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:51,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:52,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:52,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:53,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:53,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:54,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:54,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:55,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:55,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:56,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:56,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:57,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:57,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:57,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-19T01:10:57,995 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T01:10:57,995 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T01:10:57,996 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T01:10:57,997 DEBUG [Time-limited test {}] regionserver.HStore(1541): 724c36d36a235dab91c44037e24ee053/info is initiating minor compaction (all files) 2024-11-19T01:10:57,997 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T01:10:57,997 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:10:57,997 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 724c36d36a235dab91c44037e24ee053/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:10:57,997 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/976c5d1364cc46cdbc1b1d19ff3496ea, hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/f15d072343a14700aa524a5742fe5714, hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/9e3eafcf71a74285ba43d13764d159ad] into tmpdir=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/.tmp, totalSize=17.7 K 2024-11-19T01:10:57,998 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 976c5d1364cc46cdbc1b1d19ff3496ea, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731978627823 2024-11-19T01:10:57,998 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting f15d072343a14700aa524a5742fe5714, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731978637868 2024-11-19T01:10:57,999 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 9e3eafcf71a74285ba43d13764d159ad, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731978647906 2024-11-19T01:10:58,015 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 724c36d36a235dab91c44037e24ee053#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T01:10:58,015 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/.tmp/info/ff5b63c3c1634f04aa572b2487ea70c2 is 1080, key is row0001/info:/1731978627823/Put/seqid=0 2024-11-19T01:10:58,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741841_1017 (size=8296) 2024-11-19T01:10:58,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741841_1017 (size=8296) 2024-11-19T01:10:58,034 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/.tmp/info/ff5b63c3c1634f04aa572b2487ea70c2 as hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/ff5b63c3c1634f04aa572b2487ea70c2 2024-11-19T01:10:58,042 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 724c36d36a235dab91c44037e24ee053/info of 724c36d36a235dab91c44037e24ee053 into ff5b63c3c1634f04aa572b2487ea70c2(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T01:10:58,042 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 724c36d36a235dab91c44037e24ee053: 2024-11-19T01:10:58,045 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C39873%2C1731978616939.1731978658044 2024-11-19T01:10:58,056 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:58,056 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:58,056 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:58,056 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:58,056 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:10:58,057 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/WALs/5134ffc85563,39873,1731978616939/5134ffc85563%2C39873%2C1731978616939.1731978647908 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/WALs/5134ffc85563,39873,1731978616939/5134ffc85563%2C39873%2C1731978616939.1731978658044 2024-11-19T01:10:58,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741839_1015 (size=2520) 2024-11-19T01:10:58,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741839_1015 (size=2520) 2024-11-19T01:10:58,066 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38409:38409),(127.0.0.1/127.0.0.1:40521:40521)] 2024-11-19T01:10:58,067 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/WALs/5134ffc85563,39873,1731978616939/5134ffc85563%2C39873%2C1731978616939.1731978617321 to hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/oldWALs/5134ffc85563%2C39873%2C1731978616939.1731978617321 2024-11-19T01:10:58,068 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T01:10:58,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T01:10:58,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-19T01:10:58,070 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-19T01:10:58,071 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T01:10:58,071 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T01:10:58,111 INFO [master/5134ffc85563:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T01:10:58,111 INFO [master/5134ffc85563:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T01:10:58,226 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39873 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-19T01:10:58,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:10:58,226 INFO [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 724c36d36a235dab91c44037e24ee053 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T01:10:58,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/.tmp/info/3c0c5ffdf7114841ba0d503451c01d33 is 1080, key is row0000/info:/1731978658043/Put/seqid=0 2024-11-19T01:10:58,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741843_1019 (size=6033) 2024-11-19T01:10:58,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741843_1019 (size=6033) 2024-11-19T01:10:58,245 INFO [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/.tmp/info/3c0c5ffdf7114841ba0d503451c01d33 2024-11-19T01:10:58,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/.tmp/info/3c0c5ffdf7114841ba0d503451c01d33 as hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/3c0c5ffdf7114841ba0d503451c01d33 2024-11-19T01:10:58,268 INFO [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/3c0c5ffdf7114841ba0d503451c01d33, entries=1, sequenceid=18, filesize=5.9 K 2024-11-19T01:10:58,269 INFO [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 724c36d36a235dab91c44037e24ee053 in 43ms, sequenceid=18, compaction requested=false 2024-11-19T01:10:58,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 724c36d36a235dab91c44037e24ee053: 2024-11-19T01:10:58,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:10:58,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-19T01:10:58,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-19T01:10:58,276 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-19T01:10:58,276 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 200 msec 2024-11-19T01:10:58,279 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 209 msec 2024-11-19T01:10:58,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:58,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:59,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:10:59,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:00,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:00,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:01,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:01,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:02,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:02,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:03,109 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 724c36d36a235dab91c44037e24ee053, had cached 0 bytes from a total of 14329 2024-11-19T01:11:03,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:03,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:04,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:04,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:05,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:05,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:06,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:06,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:07,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:07,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:08,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-19T01:11:08,146 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T01:11:08,149 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C39873%2C1731978616939.1731978668149 2024-11-19T01:11:08,156 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:08,156 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:08,156 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:08,156 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:08,157 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:08,157 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/WALs/5134ffc85563,39873,1731978616939/5134ffc85563%2C39873%2C1731978616939.1731978658044 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/WALs/5134ffc85563,39873,1731978616939/5134ffc85563%2C39873%2C1731978616939.1731978668149 2024-11-19T01:11:08,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741842_1018 (size=2026) 2024-11-19T01:11:08,160 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/WALs/5134ffc85563,39873,1731978616939/5134ffc85563%2C39873%2C1731978616939.1731978647908 to hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/oldWALs/5134ffc85563%2C39873%2C1731978616939.1731978647908 2024-11-19T01:11:08,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741842_1018 (size=2026) 2024-11-19T01:11:08,161 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40521:40521),(127.0.0.1/127.0.0.1:38409:38409)] 2024-11-19T01:11:08,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T01:11:08,162 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T01:11:08,162 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:11:08,162 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:11:08,162 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:11:08,162 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T01:11:08,162 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1107624045, stopped=false 2024-11-19T01:11:08,162 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5134ffc85563,39169,1731978616884 2024-11-19T01:11:08,162 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T01:11:08,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T01:11:08,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39873-0x101088b54d00001, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T01:11:08,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:11:08,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39873-0x101088b54d00001, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:11:08,164 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T01:11:08,164 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T01:11:08,164 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:11:08,164 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:11:08,165 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5134ffc85563,39873,1731978616939' ***** 2024-11-19T01:11:08,165 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T01:11:08,165 INFO [RS:0;5134ffc85563:39873 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T01:11:08,165 INFO [RS:0;5134ffc85563:39873 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T01:11:08,165 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T01:11:08,165 INFO [RS:0;5134ffc85563:39873 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T01:11:08,165 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39873-0x101088b54d00001, quorum=127.0.0.1:63408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:11:08,165 INFO [RS:0;5134ffc85563:39873 {}] regionserver.HRegionServer(3091): Received CLOSE for 724c36d36a235dab91c44037e24ee053 2024-11-19T01:11:08,166 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:11:08,173 INFO [RS:0;5134ffc85563:39873 {}] regionserver.HRegionServer(959): stopping server 5134ffc85563,39873,1731978616939 2024-11-19T01:11:08,173 INFO [RS:0;5134ffc85563:39873 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T01:11:08,173 INFO [RS:0;5134ffc85563:39873 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5134ffc85563:39873. 2024-11-19T01:11:08,173 DEBUG [RS:0;5134ffc85563:39873 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:11:08,173 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 724c36d36a235dab91c44037e24ee053, disabling compactions & flushes 2024-11-19T01:11:08,173 DEBUG [RS:0;5134ffc85563:39873 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:11:08,173 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:11:08,173 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:11:08,174 INFO [RS:0;5134ffc85563:39873 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T01:11:08,174 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. after waiting 0 ms 2024-11-19T01:11:08,174 INFO [RS:0;5134ffc85563:39873 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T01:11:08,174 INFO [RS:0;5134ffc85563:39873 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T01:11:08,174 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:11:08,174 INFO [RS:0;5134ffc85563:39873 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T01:11:08,174 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 724c36d36a235dab91c44037e24ee053 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T01:11:08,174 INFO [RS:0;5134ffc85563:39873 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T01:11:08,174 DEBUG [RS:0;5134ffc85563:39873 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 724c36d36a235dab91c44037e24ee053=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053.} 2024-11-19T01:11:08,174 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T01:11:08,174 DEBUG [RS:0;5134ffc85563:39873 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 724c36d36a235dab91c44037e24ee053 2024-11-19T01:11:08,174 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T01:11:08,174 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T01:11:08,174 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T01:11:08,174 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T01:11:08,174 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-19T01:11:08,184 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/.tmp/info/c789c356338b41f59c85cb93c126c4a9 is 1080, key is row0001/info:/1731978668147/Put/seqid=0 2024-11-19T01:11:08,189 INFO [regionserver/5134ffc85563:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T01:11:08,189 INFO [regionserver/5134ffc85563:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T01:11:08,197 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740/.tmp/info/0f732eebdacf46d1a148454d6c15b68a is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053./info:regioninfo/1731978618122/Put/seqid=0 2024-11-19T01:11:08,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741845_1021 (size=6033) 2024-11-19T01:11:08,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741845_1021 (size=6033) 2024-11-19T01:11:08,201 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/.tmp/info/c789c356338b41f59c85cb93c126c4a9 2024-11-19T01:11:08,215 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/.tmp/info/c789c356338b41f59c85cb93c126c4a9 as hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/c789c356338b41f59c85cb93c126c4a9 2024-11-19T01:11:08,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741846_1022 (size=7308) 2024-11-19T01:11:08,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741846_1022 (size=7308) 2024-11-19T01:11:08,222 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740/.tmp/info/0f732eebdacf46d1a148454d6c15b68a 2024-11-19T01:11:08,229 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/c789c356338b41f59c85cb93c126c4a9, entries=1, sequenceid=22, filesize=5.9 K 2024-11-19T01:11:08,230 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 724c36d36a235dab91c44037e24ee053 in 56ms, sequenceid=22, compaction requested=true 2024-11-19T01:11:08,238 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/976c5d1364cc46cdbc1b1d19ff3496ea, hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/f15d072343a14700aa524a5742fe5714, hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/9e3eafcf71a74285ba43d13764d159ad] to archive 2024-11-19T01:11:08,239 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T01:11:08,242 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/976c5d1364cc46cdbc1b1d19ff3496ea to hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/976c5d1364cc46cdbc1b1d19ff3496ea 2024-11-19T01:11:08,243 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/f15d072343a14700aa524a5742fe5714 to hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/f15d072343a14700aa524a5742fe5714 2024-11-19T01:11:08,249 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/9e3eafcf71a74285ba43d13764d159ad to hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/info/9e3eafcf71a74285ba43d13764d159ad 2024-11-19T01:11:08,250 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5134ffc85563:39169 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-19T01:11:08,250 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [976c5d1364cc46cdbc1b1d19ff3496ea=6033, f15d072343a14700aa524a5742fe5714=6033, 9e3eafcf71a74285ba43d13764d159ad=6033] 2024-11-19T01:11:08,265 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740/.tmp/ns/9019e82bd0ef4665acdb270a079a52c4 is 43, key is default/ns:d/1731978617732/Put/seqid=0 2024-11-19T01:11:08,270 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/724c36d36a235dab91c44037e24ee053/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-19T01:11:08,273 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:11:08,273 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 724c36d36a235dab91c44037e24ee053: Waiting for close lock at 1731978668173Running coprocessor pre-close hooks at 1731978668173Disabling compacts and flushes for region at 1731978668173Disabling writes for close at 1731978668174 (+1 ms)Obtaining lock to block concurrent updates at 1731978668174Preparing flush snapshotting stores in 724c36d36a235dab91c44037e24ee053 at 1731978668174Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731978668174Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. at 1731978668175 (+1 ms)Flushing 724c36d36a235dab91c44037e24ee053/info: creating writer at 1731978668175Flushing 724c36d36a235dab91c44037e24ee053/info: appending metadata at 1731978668183 (+8 ms)Flushing 724c36d36a235dab91c44037e24ee053/info: closing flushed file at 1731978668183Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d5ef5b3: reopening flushed file at 1731978668212 (+29 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 724c36d36a235dab91c44037e24ee053 in 56ms, sequenceid=22, compaction requested=true at 1731978668230 (+18 ms)Writing region close event to WAL at 1731978668262 (+32 ms)Running coprocessor post-close hooks at 1731978668273 (+11 ms)Closed at 1731978668273 2024-11-19T01:11:08,273 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731978617768.724c36d36a235dab91c44037e24ee053. 2024-11-19T01:11:08,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741847_1023 (size=5153) 2024-11-19T01:11:08,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741847_1023 (size=5153) 2024-11-19T01:11:08,288 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740/.tmp/ns/9019e82bd0ef4665acdb270a079a52c4 2024-11-19T01:11:08,320 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740/.tmp/table/6628e5aaaf8c4786b4b1554fa8520db8 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731978618131/Put/seqid=0 2024-11-19T01:11:08,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741848_1024 (size=5508) 2024-11-19T01:11:08,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741848_1024 (size=5508) 2024-11-19T01:11:08,329 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740/.tmp/table/6628e5aaaf8c4786b4b1554fa8520db8 2024-11-19T01:11:08,336 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740/.tmp/info/0f732eebdacf46d1a148454d6c15b68a as hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740/info/0f732eebdacf46d1a148454d6c15b68a 2024-11-19T01:11:08,342 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740/info/0f732eebdacf46d1a148454d6c15b68a, entries=10, sequenceid=11, filesize=7.1 K 2024-11-19T01:11:08,343 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740/.tmp/ns/9019e82bd0ef4665acdb270a079a52c4 as hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740/ns/9019e82bd0ef4665acdb270a079a52c4 2024-11-19T01:11:08,352 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740/ns/9019e82bd0ef4665acdb270a079a52c4, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T01:11:08,353 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740/.tmp/table/6628e5aaaf8c4786b4b1554fa8520db8 as hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740/table/6628e5aaaf8c4786b4b1554fa8520db8 2024-11-19T01:11:08,359 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740/table/6628e5aaaf8c4786b4b1554fa8520db8, entries=2, sequenceid=11, filesize=5.4 K 2024-11-19T01:11:08,360 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 186ms, sequenceid=11, compaction requested=false 2024-11-19T01:11:08,365 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T01:11:08,365 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T01:11:08,365 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T01:11:08,365 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731978668174Running coprocessor pre-close hooks at 1731978668174Disabling compacts and flushes for region at 1731978668174Disabling writes for close at 1731978668174Obtaining lock to block concurrent updates at 1731978668174Preparing flush snapshotting stores in 1588230740 at 1731978668174Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731978668175 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731978668175Flushing 1588230740/info: creating writer at 1731978668176 (+1 ms)Flushing 1588230740/info: appending metadata at 1731978668197 (+21 ms)Flushing 1588230740/info: closing flushed file at 1731978668197Flushing 1588230740/ns: creating writer at 1731978668236 (+39 ms)Flushing 1588230740/ns: appending metadata at 1731978668264 (+28 ms)Flushing 1588230740/ns: closing flushed file at 1731978668264Flushing 1588230740/table: creating writer at 1731978668300 (+36 ms)Flushing 1588230740/table: appending metadata at 1731978668319 (+19 ms)Flushing 1588230740/table: closing flushed file at 1731978668319Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@248fb67: reopening flushed file at 1731978668334 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3623900f: reopening flushed file at 1731978668342 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36fa0a49: reopening flushed file at 1731978668352 (+10 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 186ms, sequenceid=11, compaction requested=false at 1731978668360 (+8 ms)Writing region close event to WAL at 1731978668361 (+1 ms)Running coprocessor post-close hooks at 1731978668365 (+4 ms)Closed at 1731978668365 2024-11-19T01:11:08,366 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T01:11:08,374 INFO [RS:0;5134ffc85563:39873 {}] regionserver.HRegionServer(976): stopping server 5134ffc85563,39873,1731978616939; all regions closed. 2024-11-19T01:11:08,375 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:08,375 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:08,375 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:08,377 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:08,377 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:08,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741834_1010 (size=3306) 2024-11-19T01:11:08,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741834_1010 (size=3306) 2024-11-19T01:11:08,386 DEBUG [RS:0;5134ffc85563:39873 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/oldWALs 2024-11-19T01:11:08,386 INFO [RS:0;5134ffc85563:39873 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5134ffc85563%2C39873%2C1731978616939.meta:.meta(num 1731978617692) 2024-11-19T01:11:08,387 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:08,387 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:08,387 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:08,387 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:08,387 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:08,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741844_1020 (size=1252) 2024-11-19T01:11:08,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741844_1020 (size=1252) 2024-11-19T01:11:08,394 DEBUG [RS:0;5134ffc85563:39873 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/oldWALs 2024-11-19T01:11:08,394 INFO [RS:0;5134ffc85563:39873 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5134ffc85563%2C39873%2C1731978616939:(num 1731978668149) 2024-11-19T01:11:08,394 DEBUG [RS:0;5134ffc85563:39873 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:11:08,394 INFO [RS:0;5134ffc85563:39873 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T01:11:08,394 INFO [RS:0;5134ffc85563:39873 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T01:11:08,394 INFO [RS:0;5134ffc85563:39873 {}] hbase.ChoreService(370): Chore service for: regionserver/5134ffc85563:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-19T01:11:08,394 INFO [RS:0;5134ffc85563:39873 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T01:11:08,395 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T01:11:08,395 INFO [RS:0;5134ffc85563:39873 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39873 2024-11-19T01:11:08,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T01:11:08,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39873-0x101088b54d00001, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5134ffc85563,39873,1731978616939 2024-11-19T01:11:08,397 INFO [RS:0;5134ffc85563:39873 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T01:11:08,398 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5134ffc85563,39873,1731978616939] 2024-11-19T01:11:08,400 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5134ffc85563,39873,1731978616939 already deleted, retry=false 2024-11-19T01:11:08,400 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5134ffc85563,39873,1731978616939 expired; onlineServers=0 2024-11-19T01:11:08,400 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5134ffc85563,39169,1731978616884' ***** 2024-11-19T01:11:08,400 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T01:11:08,400 INFO [M:0;5134ffc85563:39169 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T01:11:08,400 INFO [M:0;5134ffc85563:39169 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T01:11:08,400 DEBUG [M:0;5134ffc85563:39169 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T01:11:08,400 DEBUG [M:0;5134ffc85563:39169 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T01:11:08,400 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T01:11:08,400 DEBUG [master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978617091 {}] cleaner.HFileCleaner(306): Exit Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978617091,5,FailOnTimeoutGroup] 2024-11-19T01:11:08,400 DEBUG [master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978617091 {}] cleaner.HFileCleaner(306): Exit Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978617091,5,FailOnTimeoutGroup] 2024-11-19T01:11:08,400 INFO [M:0;5134ffc85563:39169 {}] hbase.ChoreService(370): Chore service for: master/5134ffc85563:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T01:11:08,400 INFO [M:0;5134ffc85563:39169 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T01:11:08,401 DEBUG [M:0;5134ffc85563:39169 {}] master.HMaster(1795): Stopping service threads 2024-11-19T01:11:08,401 INFO [M:0;5134ffc85563:39169 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T01:11:08,401 INFO [M:0;5134ffc85563:39169 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T01:11:08,401 INFO [M:0;5134ffc85563:39169 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T01:11:08,401 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T01:11:08,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T01:11:08,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:11:08,406 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-19T01:11:08,407 DEBUG [RegionServerTracker-0 {}] master.ActiveMasterManager(353): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-19T01:11:08,408 INFO [M:0;5134ffc85563:39169 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/.lastflushedseqids 2024-11-19T01:11:08,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741849_1025 (size=130) 2024-11-19T01:11:08,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741849_1025 (size=130) 2024-11-19T01:11:08,415 INFO [M:0;5134ffc85563:39169 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T01:11:08,415 INFO [M:0;5134ffc85563:39169 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T01:11:08,416 DEBUG [M:0;5134ffc85563:39169 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T01:11:08,416 INFO [M:0;5134ffc85563:39169 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:11:08,416 DEBUG [M:0;5134ffc85563:39169 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:11:08,416 DEBUG [M:0;5134ffc85563:39169 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T01:11:08,416 DEBUG [M:0;5134ffc85563:39169 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:11:08,416 INFO [M:0;5134ffc85563:39169 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.61 KB heapSize=55.02 KB 2024-11-19T01:11:08,432 DEBUG [M:0;5134ffc85563:39169 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a368dfd0dc4b49a2aa62c4ed5d475fdc is 82, key is hbase:meta,,1/info:regioninfo/1731978617715/Put/seqid=0 2024-11-19T01:11:08,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:08,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:08,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741850_1026 (size=5672) 2024-11-19T01:11:08,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741850_1026 (size=5672) 2024-11-19T01:11:08,439 INFO [M:0;5134ffc85563:39169 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a368dfd0dc4b49a2aa62c4ed5d475fdc 2024-11-19T01:11:08,465 DEBUG [M:0;5134ffc85563:39169 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a42b39f712214f7b8b2fc64baade1657 is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731978618135/Put/seqid=0 2024-11-19T01:11:08,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741851_1027 (size=7825) 2024-11-19T01:11:08,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741851_1027 (size=7825) 2024-11-19T01:11:08,470 INFO [M:0;5134ffc85563:39169 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.01 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a42b39f712214f7b8b2fc64baade1657 2024-11-19T01:11:08,475 INFO [M:0;5134ffc85563:39169 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a42b39f712214f7b8b2fc64baade1657 2024-11-19T01:11:08,495 DEBUG [M:0;5134ffc85563:39169 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7a2fc376c78f4ff989e4e20c91b12cf7 is 69, key is 5134ffc85563,39873,1731978616939/rs:state/1731978617174/Put/seqid=0 2024-11-19T01:11:08,499 INFO [RS:0;5134ffc85563:39873 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T01:11:08,499 INFO [RS:0;5134ffc85563:39873 {}] regionserver.HRegionServer(1031): Exiting; stopping=5134ffc85563,39873,1731978616939; zookeeper connection closed. 2024-11-19T01:11:08,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39873-0x101088b54d00001, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:11:08,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39873-0x101088b54d00001, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:11:08,500 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@40d94fbd {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@40d94fbd 2024-11-19T01:11:08,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741852_1028 (size=5156) 2024-11-19T01:11:08,500 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T01:11:08,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741852_1028 (size=5156) 2024-11-19T01:11:08,501 INFO [M:0;5134ffc85563:39169 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7a2fc376c78f4ff989e4e20c91b12cf7 2024-11-19T01:11:08,528 DEBUG [M:0;5134ffc85563:39169 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1bf85c39dac446d39d834b54bbe50500 is 52, key is load_balancer_on/state:d/1731978617764/Put/seqid=0 2024-11-19T01:11:08,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741853_1029 (size=5056) 2024-11-19T01:11:08,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741853_1029 (size=5056) 2024-11-19T01:11:08,534 INFO [M:0;5134ffc85563:39169 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1bf85c39dac446d39d834b54bbe50500 2024-11-19T01:11:08,540 DEBUG [M:0;5134ffc85563:39169 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a368dfd0dc4b49a2aa62c4ed5d475fdc as hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a368dfd0dc4b49a2aa62c4ed5d475fdc 2024-11-19T01:11:08,545 INFO [M:0;5134ffc85563:39169 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a368dfd0dc4b49a2aa62c4ed5d475fdc, entries=8, sequenceid=121, filesize=5.5 K 2024-11-19T01:11:08,546 DEBUG [M:0;5134ffc85563:39169 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a42b39f712214f7b8b2fc64baade1657 as hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a42b39f712214f7b8b2fc64baade1657 2024-11-19T01:11:08,551 INFO [M:0;5134ffc85563:39169 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a42b39f712214f7b8b2fc64baade1657 2024-11-19T01:11:08,552 INFO [M:0;5134ffc85563:39169 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a42b39f712214f7b8b2fc64baade1657, entries=14, sequenceid=121, filesize=7.6 K 2024-11-19T01:11:08,552 DEBUG [M:0;5134ffc85563:39169 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7a2fc376c78f4ff989e4e20c91b12cf7 as hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7a2fc376c78f4ff989e4e20c91b12cf7 2024-11-19T01:11:08,557 INFO [M:0;5134ffc85563:39169 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7a2fc376c78f4ff989e4e20c91b12cf7, entries=1, sequenceid=121, filesize=5.0 K 2024-11-19T01:11:08,559 DEBUG [M:0;5134ffc85563:39169 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1bf85c39dac446d39d834b54bbe50500 as hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1bf85c39dac446d39d834b54bbe50500 2024-11-19T01:11:08,567 INFO [M:0;5134ffc85563:39169 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43883/user/jenkins/test-data/7d7f6b5e-45a3-bb5d-cd5b-2f93e90ce533/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1bf85c39dac446d39d834b54bbe50500, entries=1, sequenceid=121, filesize=4.9 K 2024-11-19T01:11:08,569 INFO [M:0;5134ffc85563:39169 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.61 KB/44659, heapSize ~54.95 KB/56272, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 152ms, sequenceid=121, compaction requested=false 2024-11-19T01:11:08,579 INFO [M:0;5134ffc85563:39169 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:11:08,579 DEBUG [M:0;5134ffc85563:39169 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731978668415Disabling compacts and flushes for region at 1731978668415Disabling writes for close at 1731978668416 (+1 ms)Obtaining lock to block concurrent updates at 1731978668416Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731978668416Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44659, getHeapSize=56272, getOffHeapSize=0, getCellsCount=140 at 1731978668416Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731978668417 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731978668417Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731978668432 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731978668432Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731978668444 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731978668464 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731978668464Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731978668476 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731978668494 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731978668494Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731978668507 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731978668527 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731978668527Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e2b1bf6: reopening flushed file at 1731978668539 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34da98b8: reopening flushed file at 1731978668545 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1d4f0537: reopening flushed file at 1731978668552 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4bc6f729: reopening flushed file at 1731978668557 (+5 ms)Finished flush of dataSize ~43.61 KB/44659, heapSize ~54.95 KB/56272, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 152ms, sequenceid=121, compaction requested=false at 1731978668569 (+12 ms)Writing region close event to WAL at 1731978668579 (+10 ms)Closed at 1731978668579 2024-11-19T01:11:08,580 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:08,580 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:08,580 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:08,581 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:08,582 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:08,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37199 is added to blk_1073741830_1006 (size=53056) 2024-11-19T01:11:08,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38843 is added to blk_1073741830_1006 (size=53056) 2024-11-19T01:11:08,591 INFO [M:0;5134ffc85563:39169 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T01:11:08,591 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T01:11:08,591 INFO [M:0;5134ffc85563:39169 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39169 2024-11-19T01:11:08,591 INFO [M:0;5134ffc85563:39169 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T01:11:08,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:11:08,694 INFO [M:0;5134ffc85563:39169 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T01:11:08,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39169-0x101088b54d00000, quorum=127.0.0.1:63408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:11:08,698 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@191911fe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:11:08,699 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@30d9f702{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:11:08,699 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:11:08,699 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4dfe8781{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:11:08,699 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6df2cf02{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/hadoop.log.dir/,STOPPED} 2024-11-19T01:11:08,702 WARN [BP-2072489795-172.17.0.2-1731978616239 heartbeating to localhost/127.0.0.1:43883 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:11:08,702 WARN [BP-2072489795-172.17.0.2-1731978616239 heartbeating to localhost/127.0.0.1:43883 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2072489795-172.17.0.2-1731978616239 (Datanode Uuid 8796a9fc-384f-42c7-affc-ce54cd62ab8a) service to localhost/127.0.0.1:43883 2024-11-19T01:11:08,702 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:11:08,703 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:11:08,703 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/cluster_b4e9978a-40c5-47e6-5944-88c5de2f23d9/data/data3/current/BP-2072489795-172.17.0.2-1731978616239 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:11:08,704 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:11:08,704 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/cluster_b4e9978a-40c5-47e6-5944-88c5de2f23d9/data/data4/current/BP-2072489795-172.17.0.2-1731978616239 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:11:08,705 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:11:08,707 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1af676f5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:11:08,707 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@9cd716d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:11:08,707 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:11:08,707 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5718d675{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:11:08,707 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25c02940{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/hadoop.log.dir/,STOPPED} 2024-11-19T01:11:08,712 WARN [BP-2072489795-172.17.0.2-1731978616239 heartbeating to localhost/127.0.0.1:43883 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:11:08,712 WARN [BP-2072489795-172.17.0.2-1731978616239 heartbeating to localhost/127.0.0.1:43883 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2072489795-172.17.0.2-1731978616239 (Datanode Uuid b35ddbac-98e3-4b74-bb2c-55961cdc4137) service to localhost/127.0.0.1:43883 2024-11-19T01:11:08,713 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/cluster_b4e9978a-40c5-47e6-5944-88c5de2f23d9/data/data1/current/BP-2072489795-172.17.0.2-1731978616239 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:11:08,713 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/cluster_b4e9978a-40c5-47e6-5944-88c5de2f23d9/data/data2/current/BP-2072489795-172.17.0.2-1731978616239 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:11:08,714 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:11:08,722 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@47df503f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T01:11:08,723 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5f3519da{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:11:08,723 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:11:08,723 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a31a089{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:11:08,724 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15cd018{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/hadoop.log.dir/,STOPPED} 2024-11-19T01:11:08,732 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T01:11:08,755 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T01:11:08,765 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=205 (was 179) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/5134ffc85563:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43883 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43883 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43883 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43883 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43883 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:43883 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43883 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43883 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=486 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=316 (was 278) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3705 (was 3862) 2024-11-19T01:11:08,773 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=486, MaxFileDescriptor=1048576, SystemLoadAverage=316, ProcessCount=11, AvailableMemoryMB=3705 2024-11-19T01:11:08,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T01:11:08,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/hadoop.log.dir so I do NOT create it in target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393 2024-11-19T01:11:08,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/07d92ec8-396c-410e-4fbf-03e549721237/hadoop.tmp.dir so I do NOT create it in target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393 2024-11-19T01:11:08,774 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/cluster_63a32551-dc33-d7d3-13c7-d1f84bc8f7f8, deleteOnExit=true 2024-11-19T01:11:08,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T01:11:08,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/test.cache.data in system properties and HBase conf 2024-11-19T01:11:08,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T01:11:08,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/hadoop.log.dir in system properties and HBase conf 2024-11-19T01:11:08,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T01:11:08,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T01:11:08,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T01:11:08,774 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T01:11:08,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T01:11:08,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T01:11:08,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T01:11:08,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T01:11:08,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T01:11:08,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T01:11:08,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T01:11:08,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T01:11:08,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T01:11:08,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/nfs.dump.dir in system properties and HBase conf 2024-11-19T01:11:08,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/java.io.tmpdir in system properties and HBase conf 2024-11-19T01:11:08,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T01:11:08,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T01:11:08,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T01:11:08,792 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T01:11:08,857 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:11:08,862 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:11:08,863 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:11:08,863 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:11:08,863 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T01:11:08,864 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:11:08,864 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47a053ae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:11:08,865 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ffc76d1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:11:09,008 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2f05aefd{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/java.io.tmpdir/jetty-localhost-35017-hadoop-hdfs-3_4_1-tests_jar-_-any-4395434605521893839/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T01:11:09,009 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@282647c0{HTTP/1.1, (http/1.1)}{localhost:35017} 2024-11-19T01:11:09,009 INFO [Time-limited test {}] server.Server(415): Started @240305ms 2024-11-19T01:11:09,027 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T01:11:09,131 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:11:09,137 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:11:09,145 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:11:09,145 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:11:09,146 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T01:11:09,146 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@34e466bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:11:09,146 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3aa952ac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:11:09,190 INFO [regionserver/5134ffc85563:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T01:11:09,307 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3def21d3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/java.io.tmpdir/jetty-localhost-40395-hadoop-hdfs-3_4_1-tests_jar-_-any-11589878862984156861/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:11:09,308 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@41c3a275{HTTP/1.1, (http/1.1)}{localhost:40395} 2024-11-19T01:11:09,308 INFO [Time-limited test {}] server.Server(415): Started @240604ms 2024-11-19T01:11:09,309 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:11:09,380 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:11:09,383 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:11:09,393 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:11:09,393 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:11:09,393 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T01:11:09,394 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cfa2328{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:11:09,394 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fe51576{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:11:09,422 WARN [Thread-1950 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/cluster_63a32551-dc33-d7d3-13c7-d1f84bc8f7f8/data/data1/current/BP-1228638944-172.17.0.2-1731978668799/current, will proceed with Du for space computation calculation, 2024-11-19T01:11:09,422 WARN [Thread-1951 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/cluster_63a32551-dc33-d7d3-13c7-d1f84bc8f7f8/data/data2/current/BP-1228638944-172.17.0.2-1731978668799/current, will proceed with Du for space computation calculation, 2024-11-19T01:11:09,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:09,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:09,451 WARN [Thread-1929 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:11:09,456 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x49564bb78fe5e25 with lease ID 0x17b1af5e4af9a9ec: Processing first storage report for DS-55647ff6-bfc5-44f7-ae39-25ea624b6a75 from datanode DatanodeRegistration(127.0.0.1:36855, datanodeUuid=57e639d9-7703-41ef-a4b2-47a5286afc67, infoPort=44843, infoSecurePort=0, ipcPort=35775, storageInfo=lv=-57;cid=testClusterID;nsid=1032921705;c=1731978668799) 2024-11-19T01:11:09,456 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x49564bb78fe5e25 with lease ID 0x17b1af5e4af9a9ec: from storage DS-55647ff6-bfc5-44f7-ae39-25ea624b6a75 node DatanodeRegistration(127.0.0.1:36855, datanodeUuid=57e639d9-7703-41ef-a4b2-47a5286afc67, infoPort=44843, infoSecurePort=0, ipcPort=35775, storageInfo=lv=-57;cid=testClusterID;nsid=1032921705;c=1731978668799), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:11:09,456 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x49564bb78fe5e25 with lease ID 0x17b1af5e4af9a9ec: Processing first storage report for DS-34ad7084-a70e-4040-9959-30f78a2ab850 from datanode DatanodeRegistration(127.0.0.1:36855, datanodeUuid=57e639d9-7703-41ef-a4b2-47a5286afc67, infoPort=44843, infoSecurePort=0, ipcPort=35775, storageInfo=lv=-57;cid=testClusterID;nsid=1032921705;c=1731978668799) 2024-11-19T01:11:09,456 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x49564bb78fe5e25 with lease ID 0x17b1af5e4af9a9ec: from storage DS-34ad7084-a70e-4040-9959-30f78a2ab850 node DatanodeRegistration(127.0.0.1:36855, datanodeUuid=57e639d9-7703-41ef-a4b2-47a5286afc67, infoPort=44843, infoSecurePort=0, ipcPort=35775, storageInfo=lv=-57;cid=testClusterID;nsid=1032921705;c=1731978668799), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:11:09,549 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4d5c4bd3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/java.io.tmpdir/jetty-localhost-39899-hadoop-hdfs-3_4_1-tests_jar-_-any-3411846005865887246/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:11:09,549 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@811037{HTTP/1.1, (http/1.1)}{localhost:39899} 2024-11-19T01:11:09,549 INFO [Time-limited test {}] server.Server(415): Started @240845ms 2024-11-19T01:11:09,551 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:11:09,657 WARN [Thread-1976 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/cluster_63a32551-dc33-d7d3-13c7-d1f84bc8f7f8/data/data3/current/BP-1228638944-172.17.0.2-1731978668799/current, will proceed with Du for space computation calculation, 2024-11-19T01:11:09,658 WARN [Thread-1977 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/cluster_63a32551-dc33-d7d3-13c7-d1f84bc8f7f8/data/data4/current/BP-1228638944-172.17.0.2-1731978668799/current, will proceed with Du for space computation calculation, 2024-11-19T01:11:09,690 WARN [Thread-1965 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:11:09,695 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe1edcd5d2a26aeb9 with lease ID 0x17b1af5e4af9a9ed: Processing first storage report for DS-8e08ec69-47ff-405c-918e-f3dc1d4b0462 from datanode DatanodeRegistration(127.0.0.1:35783, datanodeUuid=085e1805-c6d5-46b7-980a-7386f7210858, infoPort=40651, infoSecurePort=0, ipcPort=39951, storageInfo=lv=-57;cid=testClusterID;nsid=1032921705;c=1731978668799) 2024-11-19T01:11:09,696 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe1edcd5d2a26aeb9 with lease ID 0x17b1af5e4af9a9ed: from storage DS-8e08ec69-47ff-405c-918e-f3dc1d4b0462 node DatanodeRegistration(127.0.0.1:35783, datanodeUuid=085e1805-c6d5-46b7-980a-7386f7210858, infoPort=40651, infoSecurePort=0, ipcPort=39951, storageInfo=lv=-57;cid=testClusterID;nsid=1032921705;c=1731978668799), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:11:09,696 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe1edcd5d2a26aeb9 with lease ID 0x17b1af5e4af9a9ed: Processing first storage report for DS-faf7091b-39d6-4b64-a099-ab67ab32347c from datanode DatanodeRegistration(127.0.0.1:35783, datanodeUuid=085e1805-c6d5-46b7-980a-7386f7210858, infoPort=40651, infoSecurePort=0, ipcPort=39951, storageInfo=lv=-57;cid=testClusterID;nsid=1032921705;c=1731978668799) 2024-11-19T01:11:09,696 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe1edcd5d2a26aeb9 with lease ID 0x17b1af5e4af9a9ed: from storage DS-faf7091b-39d6-4b64-a099-ab67ab32347c node DatanodeRegistration(127.0.0.1:35783, datanodeUuid=085e1805-c6d5-46b7-980a-7386f7210858, infoPort=40651, infoSecurePort=0, ipcPort=39951, storageInfo=lv=-57;cid=testClusterID;nsid=1032921705;c=1731978668799), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:11:09,787 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393 2024-11-19T01:11:09,790 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/cluster_63a32551-dc33-d7d3-13c7-d1f84bc8f7f8/zookeeper_0, clientPort=54802, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/cluster_63a32551-dc33-d7d3-13c7-d1f84bc8f7f8/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/cluster_63a32551-dc33-d7d3-13c7-d1f84bc8f7f8/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T01:11:09,792 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54802 2024-11-19T01:11:09,793 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:11:09,794 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:11:09,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741825_1001 (size=7) 2024-11-19T01:11:09,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741825_1001 (size=7) 2024-11-19T01:11:09,808 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2 with version=8 2024-11-19T01:11:09,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/hbase-staging 2024-11-19T01:11:09,811 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5134ffc85563:0 server-side Connection retries=45 2024-11-19T01:11:09,811 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:11:09,811 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T01:11:09,811 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T01:11:09,811 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:11:09,811 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T01:11:09,811 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T01:11:09,811 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T01:11:09,812 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34829 2024-11-19T01:11:09,813 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34829 connecting to ZooKeeper ensemble=127.0.0.1:54802 2024-11-19T01:11:09,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:348290x0, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T01:11:09,825 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34829-0x101088c23880000 connected 2024-11-19T01:11:09,849 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:11:09,851 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:11:09,854 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:11:09,854 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2, hbase.cluster.distributed=false 2024-11-19T01:11:09,856 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T01:11:09,861 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34829 2024-11-19T01:11:09,863 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34829 2024-11-19T01:11:09,865 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34829 2024-11-19T01:11:09,866 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34829 2024-11-19T01:11:09,867 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34829 2024-11-19T01:11:09,885 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5134ffc85563:0 server-side Connection retries=45 2024-11-19T01:11:09,885 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:11:09,885 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T01:11:09,885 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T01:11:09,885 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:11:09,885 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T01:11:09,885 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T01:11:09,885 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T01:11:09,886 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33403 2024-11-19T01:11:09,888 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33403 connecting to ZooKeeper ensemble=127.0.0.1:54802 2024-11-19T01:11:09,888 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:11:09,890 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:11:09,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:334030x0, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T01:11:09,895 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:334030x0, quorum=127.0.0.1:54802, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:11:09,895 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T01:11:09,897 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33403-0x101088c23880001 connected 2024-11-19T01:11:09,903 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T01:11:09,903 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33403-0x101088c23880001, quorum=127.0.0.1:54802, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T01:11:09,905 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33403-0x101088c23880001, quorum=127.0.0.1:54802, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T01:11:09,916 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33403 2024-11-19T01:11:09,917 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33403 2024-11-19T01:11:09,917 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33403 2024-11-19T01:11:09,920 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33403 2024-11-19T01:11:09,920 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33403 2024-11-19T01:11:09,934 DEBUG [M:0;5134ffc85563:34829 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5134ffc85563:34829 2024-11-19T01:11:09,935 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5134ffc85563,34829,1731978669810 2024-11-19T01:11:09,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33403-0x101088c23880001, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:11:09,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:11:09,938 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5134ffc85563,34829,1731978669810 2024-11-19T01:11:09,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:11:09,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33403-0x101088c23880001, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T01:11:09,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33403-0x101088c23880001, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:11:09,941 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T01:11:09,941 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5134ffc85563,34829,1731978669810 from backup master directory 2024-11-19T01:11:09,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33403-0x101088c23880001, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:11:09,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5134ffc85563,34829,1731978669810 2024-11-19T01:11:09,944 WARN [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T01:11:09,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:11:09,944 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5134ffc85563,34829,1731978669810 2024-11-19T01:11:09,948 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/hbase.id] with ID: f4ac183a-8d28-4740-813d-deba3b767b53 2024-11-19T01:11:09,948 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/.tmp/hbase.id 2024-11-19T01:11:09,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741826_1002 (size=42) 2024-11-19T01:11:09,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741826_1002 (size=42) 2024-11-19T01:11:09,966 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/.tmp/hbase.id]:[hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/hbase.id] 2024-11-19T01:11:09,983 INFO [master/5134ffc85563:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:11:09,983 INFO [master/5134ffc85563:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T01:11:09,984 INFO [master/5134ffc85563:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-19T01:11:09,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:11:09,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33403-0x101088c23880001, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:11:09,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741827_1003 (size=196) 2024-11-19T01:11:09,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741827_1003 (size=196) 2024-11-19T01:11:10,000 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T01:11:10,000 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T01:11:10,001 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:11:10,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741828_1004 (size=1189) 2024-11-19T01:11:10,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741828_1004 (size=1189) 2024-11-19T01:11:10,021 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store 2024-11-19T01:11:10,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741829_1005 (size=34) 2024-11-19T01:11:10,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741829_1005 (size=34) 2024-11-19T01:11:10,038 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:11:10,038 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T01:11:10,038 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:11:10,038 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:11:10,038 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T01:11:10,038 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:11:10,038 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:11:10,038 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731978670038Disabling compacts and flushes for region at 1731978670038Disabling writes for close at 1731978670038Writing region close event to WAL at 1731978670038Closed at 1731978670038 2024-11-19T01:11:10,039 WARN [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/.initializing 2024-11-19T01:11:10,039 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/WALs/5134ffc85563,34829,1731978669810 2024-11-19T01:11:10,043 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C34829%2C1731978669810, suffix=, logDir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/WALs/5134ffc85563,34829,1731978669810, archiveDir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/oldWALs, maxLogs=10 2024-11-19T01:11:10,043 INFO [master/5134ffc85563:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C34829%2C1731978669810.1731978670043 2024-11-19T01:11:10,063 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/WALs/5134ffc85563,34829,1731978669810/5134ffc85563%2C34829%2C1731978669810.1731978670043 2024-11-19T01:11:10,071 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40651:40651),(127.0.0.1/127.0.0.1:44843:44843)] 2024-11-19T01:11:10,076 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T01:11:10,077 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:11:10,077 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:11:10,077 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:11:10,081 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:11:10,083 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T01:11:10,083 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:11:10,083 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:11:10,084 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:11:10,085 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T01:11:10,085 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:11:10,085 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:11:10,086 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:11:10,087 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T01:11:10,087 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:11:10,088 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:11:10,088 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:11:10,089 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T01:11:10,089 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:11:10,089 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:11:10,089 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:11:10,090 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:11:10,090 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:11:10,092 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:11:10,092 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:11:10,094 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T01:11:10,095 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:11:10,120 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T01:11:10,123 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=842490, jitterRate=0.07128162682056427}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T01:11:10,124 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731978670077Initializing all the Stores at 1731978670078 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978670078Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978670081 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978670081Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978670081Cleaning up temporary data from old regions at 1731978670092 (+11 ms)Region opened successfully at 1731978670124 (+32 ms) 2024-11-19T01:11:10,125 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T01:11:10,131 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@241e7c36, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5134ffc85563/172.17.0.2:0 2024-11-19T01:11:10,131 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T01:11:10,132 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T01:11:10,132 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T01:11:10,132 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T01:11:10,133 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T01:11:10,133 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T01:11:10,133 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T01:11:10,138 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T01:11:10,142 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T01:11:10,144 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T01:11:10,144 INFO [master/5134ffc85563:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T01:11:10,145 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T01:11:10,147 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T01:11:10,148 INFO [master/5134ffc85563:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T01:11:10,150 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T01:11:10,152 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T01:11:10,153 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T01:11:10,154 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T01:11:10,158 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T01:11:10,160 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T01:11:10,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T01:11:10,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:11:10,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33403-0x101088c23880001, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T01:11:10,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33403-0x101088c23880001, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:11:10,162 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5134ffc85563,34829,1731978669810, sessionid=0x101088c23880000, setting cluster-up flag (Was=false) 2024-11-19T01:11:10,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:11:10,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33403-0x101088c23880001, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:11:10,171 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T01:11:10,172 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5134ffc85563,34829,1731978669810 2024-11-19T01:11:10,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:11:10,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33403-0x101088c23880001, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:11:10,185 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T01:11:10,186 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5134ffc85563,34829,1731978669810 2024-11-19T01:11:10,189 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T01:11:10,192 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T01:11:10,192 INFO [master/5134ffc85563:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T01:11:10,192 INFO [master/5134ffc85563:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T01:11:10,193 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5134ffc85563,34829,1731978669810 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T01:11:10,195 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:11:10,195 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:11:10,196 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:11:10,196 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:11:10,196 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5134ffc85563:0, corePoolSize=10, maxPoolSize=10 2024-11-19T01:11:10,196 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:11:10,196 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5134ffc85563:0, corePoolSize=2, maxPoolSize=2 2024-11-19T01:11:10,196 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:11:10,207 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:11:10,208 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T01:11:10,209 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:11:10,209 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T01:11:10,218 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731978700217 2024-11-19T01:11:10,218 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T01:11:10,218 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T01:11:10,218 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T01:11:10,218 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T01:11:10,218 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T01:11:10,218 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T01:11:10,221 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:10,230 INFO [RS:0;5134ffc85563:33403 {}] regionserver.HRegionServer(746): ClusterId : f4ac183a-8d28-4740-813d-deba3b767b53 2024-11-19T01:11:10,230 DEBUG [RS:0;5134ffc85563:33403 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T01:11:10,233 DEBUG [RS:0;5134ffc85563:33403 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T01:11:10,233 DEBUG [RS:0;5134ffc85563:33403 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T01:11:10,235 DEBUG [RS:0;5134ffc85563:33403 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T01:11:10,236 DEBUG [RS:0;5134ffc85563:33403 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c6edcd6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5134ffc85563/172.17.0.2:0 2024-11-19T01:11:10,237 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T01:11:10,237 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T01:11:10,238 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T01:11:10,239 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T01:11:10,239 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T01:11:10,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741831_1007 (size=1321) 2024-11-19T01:11:10,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741831_1007 (size=1321) 2024-11-19T01:11:10,248 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T01:11:10,248 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2 2024-11-19T01:11:10,253 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978670239,5,FailOnTimeoutGroup] 2024-11-19T01:11:10,256 DEBUG [RS:0;5134ffc85563:33403 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5134ffc85563:33403 2024-11-19T01:11:10,257 INFO [RS:0;5134ffc85563:33403 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T01:11:10,257 INFO [RS:0;5134ffc85563:33403 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T01:11:10,257 DEBUG [RS:0;5134ffc85563:33403 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T01:11:10,257 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978670253,5,FailOnTimeoutGroup] 2024-11-19T01:11:10,257 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:10,257 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T01:11:10,257 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:10,257 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:10,258 INFO [RS:0;5134ffc85563:33403 {}] regionserver.HRegionServer(2659): reportForDuty to master=5134ffc85563,34829,1731978669810 with port=33403, startcode=1731978669885 2024-11-19T01:11:10,258 DEBUG [RS:0;5134ffc85563:33403 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T01:11:10,268 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33459, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T01:11:10,269 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34829 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5134ffc85563,33403,1731978669885 2024-11-19T01:11:10,269 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34829 {}] master.ServerManager(517): Registering regionserver=5134ffc85563,33403,1731978669885 2024-11-19T01:11:10,273 DEBUG [RS:0;5134ffc85563:33403 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2 2024-11-19T01:11:10,273 DEBUG [RS:0;5134ffc85563:33403 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41945 2024-11-19T01:11:10,273 DEBUG [RS:0;5134ffc85563:33403 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T01:11:10,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T01:11:10,276 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5134ffc85563,33403,1731978669885] 2024-11-19T01:11:10,278 DEBUG [RS:0;5134ffc85563:33403 {}] zookeeper.ZKUtil(111): regionserver:33403-0x101088c23880001, quorum=127.0.0.1:54802, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5134ffc85563,33403,1731978669885 2024-11-19T01:11:10,278 WARN [RS:0;5134ffc85563:33403 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T01:11:10,278 INFO [RS:0;5134ffc85563:33403 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:11:10,278 DEBUG [RS:0;5134ffc85563:33403 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/WALs/5134ffc85563,33403,1731978669885 2024-11-19T01:11:10,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741832_1008 (size=32) 2024-11-19T01:11:10,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741832_1008 (size=32) 2024-11-19T01:11:10,287 INFO [RS:0;5134ffc85563:33403 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T01:11:10,289 INFO [RS:0;5134ffc85563:33403 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T01:11:10,290 INFO [RS:0;5134ffc85563:33403 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T01:11:10,290 INFO [RS:0;5134ffc85563:33403 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:10,291 INFO [RS:0;5134ffc85563:33403 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T01:11:10,292 INFO [RS:0;5134ffc85563:33403 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T01:11:10,292 INFO [RS:0;5134ffc85563:33403 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:10,292 DEBUG [RS:0;5134ffc85563:33403 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:11:10,292 DEBUG [RS:0;5134ffc85563:33403 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:11:10,292 DEBUG [RS:0;5134ffc85563:33403 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:11:10,292 DEBUG [RS:0;5134ffc85563:33403 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:11:10,292 DEBUG [RS:0;5134ffc85563:33403 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:11:10,292 DEBUG [RS:0;5134ffc85563:33403 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5134ffc85563:0, corePoolSize=2, maxPoolSize=2 2024-11-19T01:11:10,292 DEBUG [RS:0;5134ffc85563:33403 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:11:10,292 DEBUG [RS:0;5134ffc85563:33403 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:11:10,292 DEBUG [RS:0;5134ffc85563:33403 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:11:10,292 DEBUG [RS:0;5134ffc85563:33403 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:11:10,292 DEBUG [RS:0;5134ffc85563:33403 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:11:10,292 DEBUG [RS:0;5134ffc85563:33403 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:11:10,292 DEBUG [RS:0;5134ffc85563:33403 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5134ffc85563:0, corePoolSize=3, maxPoolSize=3 2024-11-19T01:11:10,292 DEBUG [RS:0;5134ffc85563:33403 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0, corePoolSize=3, maxPoolSize=3 2024-11-19T01:11:10,300 INFO [RS:0;5134ffc85563:33403 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:10,300 INFO [RS:0;5134ffc85563:33403 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:10,300 INFO [RS:0;5134ffc85563:33403 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:10,300 INFO [RS:0;5134ffc85563:33403 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:10,300 INFO [RS:0;5134ffc85563:33403 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:10,300 INFO [RS:0;5134ffc85563:33403 {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,33403,1731978669885-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T01:11:10,325 INFO [RS:0;5134ffc85563:33403 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T01:11:10,325 INFO [RS:0;5134ffc85563:33403 {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,33403,1731978669885-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:10,325 INFO [RS:0;5134ffc85563:33403 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:10,325 INFO [RS:0;5134ffc85563:33403 {}] regionserver.Replication(171): 5134ffc85563,33403,1731978669885 started 2024-11-19T01:11:10,345 INFO [RS:0;5134ffc85563:33403 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:10,345 INFO [RS:0;5134ffc85563:33403 {}] regionserver.HRegionServer(1482): Serving as 5134ffc85563,33403,1731978669885, RpcServer on 5134ffc85563/172.17.0.2:33403, sessionid=0x101088c23880001 2024-11-19T01:11:10,346 DEBUG [RS:0;5134ffc85563:33403 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T01:11:10,346 DEBUG [RS:0;5134ffc85563:33403 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5134ffc85563,33403,1731978669885 2024-11-19T01:11:10,346 DEBUG [RS:0;5134ffc85563:33403 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5134ffc85563,33403,1731978669885' 2024-11-19T01:11:10,346 DEBUG [RS:0;5134ffc85563:33403 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T01:11:10,347 DEBUG [RS:0;5134ffc85563:33403 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T01:11:10,347 DEBUG [RS:0;5134ffc85563:33403 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T01:11:10,347 DEBUG [RS:0;5134ffc85563:33403 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T01:11:10,347 DEBUG [RS:0;5134ffc85563:33403 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5134ffc85563,33403,1731978669885 2024-11-19T01:11:10,347 DEBUG [RS:0;5134ffc85563:33403 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5134ffc85563,33403,1731978669885' 2024-11-19T01:11:10,347 DEBUG [RS:0;5134ffc85563:33403 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T01:11:10,347 DEBUG [RS:0;5134ffc85563:33403 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T01:11:10,348 DEBUG [RS:0;5134ffc85563:33403 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T01:11:10,348 INFO [RS:0;5134ffc85563:33403 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T01:11:10,348 INFO [RS:0;5134ffc85563:33403 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T01:11:10,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:10,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:10,450 INFO [RS:0;5134ffc85563:33403 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C33403%2C1731978669885, suffix=, logDir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/WALs/5134ffc85563,33403,1731978669885, archiveDir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/oldWALs, maxLogs=32 2024-11-19T01:11:10,450 INFO [RS:0;5134ffc85563:33403 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C33403%2C1731978669885.1731978670450 2024-11-19T01:11:10,456 INFO [RS:0;5134ffc85563:33403 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/WALs/5134ffc85563,33403,1731978669885/5134ffc85563%2C33403%2C1731978669885.1731978670450 2024-11-19T01:11:10,464 DEBUG [RS:0;5134ffc85563:33403 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44843:44843),(127.0.0.1/127.0.0.1:40651:40651)] 2024-11-19T01:11:10,685 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:11:10,686 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T01:11:10,688 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T01:11:10,688 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:11:10,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:11:10,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T01:11:10,690 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T01:11:10,690 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:11:10,690 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:11:10,690 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T01:11:10,691 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T01:11:10,692 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:11:10,692 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:11:10,692 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T01:11:10,693 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T01:11:10,693 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:11:10,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:11:10,694 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T01:11:10,694 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740 2024-11-19T01:11:10,695 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740 2024-11-19T01:11:10,697 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T01:11:10,697 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T01:11:10,697 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T01:11:10,699 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T01:11:10,702 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T01:11:10,703 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=835003, jitterRate=0.06176219880580902}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T01:11:10,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731978670685Initializing all the Stores at 1731978670686 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978670686Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978670686Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978670686Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978670686Cleaning up temporary data from old regions at 1731978670697 (+11 ms)Region opened successfully at 1731978670703 (+6 ms) 2024-11-19T01:11:10,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T01:11:10,703 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T01:11:10,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T01:11:10,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T01:11:10,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T01:11:10,705 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T01:11:10,705 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731978670703Disabling compacts and flushes for region at 1731978670703Disabling writes for close at 1731978670703Writing region close event to WAL at 1731978670705 (+2 ms)Closed at 1731978670705 2024-11-19T01:11:10,708 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:11:10,708 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T01:11:10,708 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T01:11:10,710 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T01:11:10,711 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T01:11:10,861 DEBUG [5134ffc85563:34829 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T01:11:10,862 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5134ffc85563,33403,1731978669885 2024-11-19T01:11:10,864 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5134ffc85563,33403,1731978669885, state=OPENING 2024-11-19T01:11:10,865 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T01:11:10,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:11:10,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33403-0x101088c23880001, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:11:10,868 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T01:11:10,868 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5134ffc85563,33403,1731978669885}] 2024-11-19T01:11:10,868 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:11:10,869 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:11:11,021 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T01:11:11,024 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39289, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T01:11:11,028 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T01:11:11,028 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:11:11,030 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C33403%2C1731978669885.meta, suffix=.meta, logDir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/WALs/5134ffc85563,33403,1731978669885, archiveDir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/oldWALs, maxLogs=32 2024-11-19T01:11:11,030 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C33403%2C1731978669885.meta.1731978671030.meta 2024-11-19T01:11:11,037 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/WALs/5134ffc85563,33403,1731978669885/5134ffc85563%2C33403%2C1731978669885.meta.1731978671030.meta 2024-11-19T01:11:11,043 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40651:40651),(127.0.0.1/127.0.0.1:44843:44843)] 2024-11-19T01:11:11,044 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T01:11:11,044 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T01:11:11,045 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T01:11:11,045 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T01:11:11,045 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T01:11:11,045 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:11:11,045 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T01:11:11,045 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T01:11:11,047 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T01:11:11,047 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T01:11:11,047 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:11:11,048 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:11:11,048 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T01:11:11,049 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T01:11:11,049 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:11:11,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:11:11,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T01:11:11,050 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T01:11:11,050 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:11:11,051 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:11:11,051 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T01:11:11,051 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T01:11:11,051 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:11:11,052 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:11:11,052 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T01:11:11,053 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740 2024-11-19T01:11:11,054 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740 2024-11-19T01:11:11,056 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T01:11:11,056 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T01:11:11,057 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T01:11:11,058 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T01:11:11,059 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=774158, jitterRate=-0.015607565641403198}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T01:11:11,059 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T01:11:11,059 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731978671045Writing region info on filesystem at 1731978671045Initializing all the Stores at 1731978671046 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978671046Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978671046Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978671046Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978671046Cleaning up temporary data from old regions at 1731978671056 (+10 ms)Running coprocessor post-open hooks at 1731978671059 (+3 ms)Region opened successfully at 1731978671059 2024-11-19T01:11:11,060 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731978671021 2024-11-19T01:11:11,063 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T01:11:11,063 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T01:11:11,064 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5134ffc85563,33403,1731978669885 2024-11-19T01:11:11,065 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5134ffc85563,33403,1731978669885, state=OPEN 2024-11-19T01:11:11,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33403-0x101088c23880001, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T01:11:11,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T01:11:11,069 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:11:11,069 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5134ffc85563,33403,1731978669885 2024-11-19T01:11:11,069 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:11:11,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T01:11:11,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5134ffc85563,33403,1731978669885 in 201 msec 2024-11-19T01:11:11,075 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T01:11:11,075 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 364 msec 2024-11-19T01:11:11,076 DEBUG [PEWorker-3 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:11:11,076 INFO [PEWorker-3 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T01:11:11,078 DEBUG [PEWorker-3 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T01:11:11,078 DEBUG [PEWorker-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5134ffc85563,33403,1731978669885, seqNum=-1] 2024-11-19T01:11:11,078 DEBUG [PEWorker-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T01:11:11,079 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44819, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T01:11:11,086 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 895 msec 2024-11-19T01:11:11,087 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731978671086, completionTime=-1 2024-11-19T01:11:11,087 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T01:11:11,087 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T01:11:11,089 INFO [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T01:11:11,089 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731978731089 2024-11-19T01:11:11,089 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731978791089 2024-11-19T01:11:11,089 INFO [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-19T01:11:11,090 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,34829,1731978669810-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:11,090 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,34829,1731978669810-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:11,090 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,34829,1731978669810-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:11,090 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5134ffc85563:34829, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:11,090 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:11,090 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:11,092 DEBUG [master/5134ffc85563:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T01:11:11,096 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.152sec 2024-11-19T01:11:11,096 INFO [master/5134ffc85563:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T01:11:11,096 INFO [master/5134ffc85563:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T01:11:11,096 INFO [master/5134ffc85563:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T01:11:11,096 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T01:11:11,096 INFO [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T01:11:11,096 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,34829,1731978669810-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T01:11:11,097 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,34829,1731978669810-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T01:11:11,099 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T01:11:11,099 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T01:11:11,099 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,34829,1731978669810-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:11:11,131 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77ee7ab7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:11:11,131 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5134ffc85563,34829,-1 for getting cluster id 2024-11-19T01:11:11,131 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T01:11:11,132 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f4ac183a-8d28-4740-813d-deba3b767b53' 2024-11-19T01:11:11,133 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T01:11:11,133 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f4ac183a-8d28-4740-813d-deba3b767b53" 2024-11-19T01:11:11,133 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75642934, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:11:11,133 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5134ffc85563,34829,-1] 2024-11-19T01:11:11,134 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T01:11:11,134 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:11:11,135 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44758, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T01:11:11,136 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@762da951, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:11:11,136 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T01:11:11,138 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5134ffc85563,33403,1731978669885, seqNum=-1] 2024-11-19T01:11:11,138 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T01:11:11,139 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53142, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T01:11:11,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5134ffc85563,34829,1731978669810 2024-11-19T01:11:11,141 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:11:11,144 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T01:11:11,144 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T01:11:11,145 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 5134ffc85563,34829,1731978669810 2024-11-19T01:11:11,145 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3f899f81 2024-11-19T01:11:11,146 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T01:11:11,147 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44760, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T01:11:11,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34829 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T01:11:11,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34829 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T01:11:11,148 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34829 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T01:11:11,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34829 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-19T01:11:11,151 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T01:11:11,151 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:11:11,151 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34829 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-19T01:11:11,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34829 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T01:11:11,152 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T01:11:11,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741835_1011 (size=381) 2024-11-19T01:11:11,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741835_1011 (size=381) 2024-11-19T01:11:11,166 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => bcf5b20058f01b9e033c7ae9cd39b33f, NAME => 'TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2 2024-11-19T01:11:11,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741836_1012 (size=64) 2024-11-19T01:11:11,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741836_1012 (size=64) 2024-11-19T01:11:11,173 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:11:11,173 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing bcf5b20058f01b9e033c7ae9cd39b33f, disabling compactions & flushes 2024-11-19T01:11:11,173 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f. 2024-11-19T01:11:11,173 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f. 2024-11-19T01:11:11,173 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f. after waiting 0 ms 2024-11-19T01:11:11,173 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f. 2024-11-19T01:11:11,173 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f. 2024-11-19T01:11:11,173 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for bcf5b20058f01b9e033c7ae9cd39b33f: Waiting for close lock at 1731978671173Disabling compacts and flushes for region at 1731978671173Disabling writes for close at 1731978671173Writing region close event to WAL at 1731978671173Closed at 1731978671173 2024-11-19T01:11:11,177 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T01:11:11,177 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731978671177"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731978671177"}]},"ts":"1731978671177"} 2024-11-19T01:11:11,180 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T01:11:11,181 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T01:11:11,181 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731978671181"}]},"ts":"1731978671181"} 2024-11-19T01:11:11,183 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-19T01:11:11,184 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bcf5b20058f01b9e033c7ae9cd39b33f, ASSIGN}] 2024-11-19T01:11:11,185 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bcf5b20058f01b9e033c7ae9cd39b33f, ASSIGN 2024-11-19T01:11:11,186 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bcf5b20058f01b9e033c7ae9cd39b33f, ASSIGN; state=OFFLINE, location=5134ffc85563,33403,1731978669885; forceNewPlan=false, retain=false 2024-11-19T01:11:11,337 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bcf5b20058f01b9e033c7ae9cd39b33f, regionState=OPENING, regionLocation=5134ffc85563,33403,1731978669885 2024-11-19T01:11:11,339 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bcf5b20058f01b9e033c7ae9cd39b33f, ASSIGN because future has completed 2024-11-19T01:11:11,340 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bcf5b20058f01b9e033c7ae9cd39b33f, server=5134ffc85563,33403,1731978669885}] 2024-11-19T01:11:11,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:11,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:11,496 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f. 2024-11-19T01:11:11,497 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => bcf5b20058f01b9e033c7ae9cd39b33f, NAME => 'TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f.', STARTKEY => '', ENDKEY => ''} 2024-11-19T01:11:11,497 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:11,497 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:11:11,497 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:11,497 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:11,499 INFO [StoreOpener-bcf5b20058f01b9e033c7ae9cd39b33f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:11,500 INFO [StoreOpener-bcf5b20058f01b9e033c7ae9cd39b33f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bcf5b20058f01b9e033c7ae9cd39b33f columnFamilyName info 2024-11-19T01:11:11,500 DEBUG [StoreOpener-bcf5b20058f01b9e033c7ae9cd39b33f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:11:11,501 INFO [StoreOpener-bcf5b20058f01b9e033c7ae9cd39b33f-1 {}] regionserver.HStore(327): Store=bcf5b20058f01b9e033c7ae9cd39b33f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:11:11,501 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:11,502 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:11,502 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:11,502 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:11,502 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:11,504 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:11,506 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T01:11:11,506 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened bcf5b20058f01b9e033c7ae9cd39b33f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=691097, jitterRate=-0.12122492492198944}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T01:11:11,506 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:11,507 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for bcf5b20058f01b9e033c7ae9cd39b33f: Running coprocessor pre-open hook at 1731978671497Writing region info on filesystem at 1731978671497Initializing all the Stores at 1731978671498 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978671498Cleaning up temporary data from old regions at 1731978671502 (+4 ms)Running coprocessor post-open hooks at 1731978671506 (+4 ms)Region opened successfully at 1731978671507 (+1 ms) 2024-11-19T01:11:11,508 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f., pid=6, masterSystemTime=1731978671492 2024-11-19T01:11:11,511 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f. 2024-11-19T01:11:11,511 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f. 2024-11-19T01:11:11,511 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bcf5b20058f01b9e033c7ae9cd39b33f, regionState=OPEN, openSeqNum=2, regionLocation=5134ffc85563,33403,1731978669885 2024-11-19T01:11:11,514 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bcf5b20058f01b9e033c7ae9cd39b33f, server=5134ffc85563,33403,1731978669885 because future has completed 2024-11-19T01:11:11,517 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T01:11:11,517 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure bcf5b20058f01b9e033c7ae9cd39b33f, server=5134ffc85563,33403,1731978669885 in 175 msec 2024-11-19T01:11:11,520 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T01:11:11,520 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bcf5b20058f01b9e033c7ae9cd39b33f, ASSIGN in 334 msec 2024-11-19T01:11:11,521 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T01:11:11,521 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731978671521"}]},"ts":"1731978671521"} 2024-11-19T01:11:11,523 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-19T01:11:11,524 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T01:11:11,526 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 376 msec 2024-11-19T01:11:12,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:12,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:13,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,276 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,276 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,305 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,305 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,306 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,308 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:13,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:13,815 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T01:11:13,816 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,817 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,817 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,817 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,848 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,848 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,849 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,849 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,850 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:13,859 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:14,319 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-19T01:11:14,319 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-19T01:11:14,320 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T01:11:14,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:14,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:15,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:15,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:16,287 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T01:11:16,288 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-19T01:11:16,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:16,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:17,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:17,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:18,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:18,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:19,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:19,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:19,822 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T01:11:19,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:19,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:19,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:19,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:19,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:19,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:19,825 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:19,825 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:19,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:19,848 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:19,848 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:19,849 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:19,849 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:19,850 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:19,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:19,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:19,856 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:19,859 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:20,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:20,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:21,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34829 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T01:11:21,215 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-19T01:11:21,216 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-19T01:11:21,218 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-19T01:11:21,218 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f. 2024-11-19T01:11:21,221 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f., hostname=5134ffc85563,33403,1731978669885, seqNum=2] 2024-11-19T01:11:21,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] regionserver.HRegion(8855): Flush requested on bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:21,232 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bcf5b20058f01b9e033c7ae9cd39b33f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T01:11:21,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/afc62e9f11924915aace9e599313070c is 1080, key is row0001/info:/1731978681222/Put/seqid=0 2024-11-19T01:11:21,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741837_1013 (size=12509) 2024-11-19T01:11:21,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741837_1013 (size=12509) 2024-11-19T01:11:21,255 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/afc62e9f11924915aace9e599313070c 2024-11-19T01:11:21,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/afc62e9f11924915aace9e599313070c as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/afc62e9f11924915aace9e599313070c 2024-11-19T01:11:21,267 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/afc62e9f11924915aace9e599313070c, entries=7, sequenceid=11, filesize=12.2 K 2024-11-19T01:11:21,268 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for bcf5b20058f01b9e033c7ae9cd39b33f in 36ms, sequenceid=11, compaction requested=false 2024-11-19T01:11:21,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bcf5b20058f01b9e033c7ae9cd39b33f: 2024-11-19T01:11:21,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] regionserver.HRegion(8855): Flush requested on bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:21,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bcf5b20058f01b9e033c7ae9cd39b33f 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-19T01:11:21,273 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/bcc1df2d4f8c411c90fd4530f4c35c00 is 1080, key is row0008/info:/1731978681233/Put/seqid=0 2024-11-19T01:11:21,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741838_1014 (size=29761) 2024-11-19T01:11:21,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741838_1014 (size=29761) 2024-11-19T01:11:21,278 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/bcc1df2d4f8c411c90fd4530f4c35c00 2024-11-19T01:11:21,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/bcc1df2d4f8c411c90fd4530f4c35c00 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/bcc1df2d4f8c411c90fd4530f4c35c00 2024-11-19T01:11:21,287 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/bcc1df2d4f8c411c90fd4530f4c35c00, entries=23, sequenceid=37, filesize=29.1 K 2024-11-19T01:11:21,288 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for bcf5b20058f01b9e033c7ae9cd39b33f in 19ms, sequenceid=37, compaction requested=false 2024-11-19T01:11:21,288 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bcf5b20058f01b9e033c7ae9cd39b33f: 2024-11-19T01:11:21,288 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-19T01:11:21,288 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:11:21,288 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/bcc1df2d4f8c411c90fd4530f4c35c00 because midkey is the same as first or last row 2024-11-19T01:11:21,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:21,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:22,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:22,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:23,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] regionserver.HRegion(8855): Flush requested on bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:23,282 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bcf5b20058f01b9e033c7ae9cd39b33f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T01:11:23,286 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/4d3a3fa673384c5b8febf973b59ab425 is 1080, key is row0031/info:/1731978681270/Put/seqid=0 2024-11-19T01:11:23,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741839_1015 (size=12509) 2024-11-19T01:11:23,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741839_1015 (size=12509) 2024-11-19T01:11:23,298 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/4d3a3fa673384c5b8febf973b59ab425 2024-11-19T01:11:23,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/4d3a3fa673384c5b8febf973b59ab425 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/4d3a3fa673384c5b8febf973b59ab425 2024-11-19T01:11:23,311 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/4d3a3fa673384c5b8febf973b59ab425, entries=7, sequenceid=47, filesize=12.2 K 2024-11-19T01:11:23,311 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for bcf5b20058f01b9e033c7ae9cd39b33f in 30ms, sequenceid=47, compaction requested=true 2024-11-19T01:11:23,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bcf5b20058f01b9e033c7ae9cd39b33f: 2024-11-19T01:11:23,312 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-19T01:11:23,312 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:11:23,312 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/bcc1df2d4f8c411c90fd4530f4c35c00 because midkey is the same as first or last row 2024-11-19T01:11:23,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcf5b20058f01b9e033c7ae9cd39b33f:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T01:11:23,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:23,312 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T01:11:23,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] regionserver.HRegion(8855): Flush requested on bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:23,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bcf5b20058f01b9e033c7ae9cd39b33f 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-11-19T01:11:23,313 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T01:11:23,313 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1541): bcf5b20058f01b9e033c7ae9cd39b33f/info is initiating minor compaction (all files) 2024-11-19T01:11:23,313 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bcf5b20058f01b9e033c7ae9cd39b33f/info in TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f. 2024-11-19T01:11:23,314 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/afc62e9f11924915aace9e599313070c, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/bcc1df2d4f8c411c90fd4530f4c35c00, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/4d3a3fa673384c5b8febf973b59ab425] into tmpdir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp, totalSize=53.5 K 2024-11-19T01:11:23,314 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting afc62e9f11924915aace9e599313070c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731978681222 2024-11-19T01:11:23,315 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting bcc1df2d4f8c411c90fd4530f4c35c00, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731978681233 2024-11-19T01:11:23,315 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4d3a3fa673384c5b8febf973b59ab425, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731978681270 2024-11-19T01:11:23,317 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/24e0f131328d426a9c37334f4b4ccb3c is 1080, key is row0038/info:/1731978683282/Put/seqid=0 2024-11-19T01:11:23,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741840_1016 (size=25453) 2024-11-19T01:11:23,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741840_1016 (size=25453) 2024-11-19T01:11:23,323 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=69 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/24e0f131328d426a9c37334f4b4ccb3c 2024-11-19T01:11:23,328 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcf5b20058f01b9e033c7ae9cd39b33f#info#compaction#59 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T01:11:23,328 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/b4a215f85c13430ea9432ee80b9f8f7a is 1080, key is row0001/info:/1731978681222/Put/seqid=0 2024-11-19T01:11:23,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/24e0f131328d426a9c37334f4b4ccb3c as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/24e0f131328d426a9c37334f4b4ccb3c 2024-11-19T01:11:23,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741841_1017 (size=44978) 2024-11-19T01:11:23,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741841_1017 (size=44978) 2024-11-19T01:11:23,334 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/24e0f131328d426a9c37334f4b4ccb3c, entries=19, sequenceid=69, filesize=24.9 K 2024-11-19T01:11:23,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=8.41 KB/8608 for bcf5b20058f01b9e033c7ae9cd39b33f in 22ms, sequenceid=69, compaction requested=false 2024-11-19T01:11:23,335 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bcf5b20058f01b9e033c7ae9cd39b33f: 2024-11-19T01:11:23,335 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=78.4 K, sizeToCheck=16.0 K 2024-11-19T01:11:23,335 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:11:23,335 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/bcc1df2d4f8c411c90fd4530f4c35c00 because midkey is the same as first or last row 2024-11-19T01:11:23,339 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/b4a215f85c13430ea9432ee80b9f8f7a as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/b4a215f85c13430ea9432ee80b9f8f7a 2024-11-19T01:11:23,344 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bcf5b20058f01b9e033c7ae9cd39b33f/info of bcf5b20058f01b9e033c7ae9cd39b33f into b4a215f85c13430ea9432ee80b9f8f7a(size=43.9 K), total size for store is 68.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T01:11:23,344 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bcf5b20058f01b9e033c7ae9cd39b33f: 2024-11-19T01:11:23,344 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f., storeName=bcf5b20058f01b9e033c7ae9cd39b33f/info, priority=13, startTime=1731978683312; duration=0sec 2024-11-19T01:11:23,345 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=68.8 K, sizeToCheck=16.0 K 2024-11-19T01:11:23,345 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:11:23,345 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/b4a215f85c13430ea9432ee80b9f8f7a because midkey is the same as first or last row 2024-11-19T01:11:23,345 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=68.8 K, sizeToCheck=16.0 K 2024-11-19T01:11:23,345 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:11:23,345 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/b4a215f85c13430ea9432ee80b9f8f7a because midkey is the same as first or last row 2024-11-19T01:11:23,345 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=68.8 K, sizeToCheck=16.0 K 2024-11-19T01:11:23,345 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:11:23,345 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/b4a215f85c13430ea9432ee80b9f8f7a because midkey is the same as first or last row 2024-11-19T01:11:23,345 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:23,345 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcf5b20058f01b9e033c7ae9cd39b33f:info 2024-11-19T01:11:23,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:23,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:24,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:24,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:25,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] regionserver.HRegion(8855): Flush requested on bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:25,330 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bcf5b20058f01b9e033c7ae9cd39b33f 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-19T01:11:25,335 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/5a05909058c04712b8d7b4ec3f3f5bc3 is 1080, key is row0057/info:/1731978683314/Put/seqid=0 2024-11-19T01:11:25,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741842_1018 (size=14663) 2024-11-19T01:11:25,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741842_1018 (size=14663) 2024-11-19T01:11:25,341 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/5a05909058c04712b8d7b4ec3f3f5bc3 2024-11-19T01:11:25,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/5a05909058c04712b8d7b4ec3f3f5bc3 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/5a05909058c04712b8d7b4ec3f3f5bc3 2024-11-19T01:11:25,353 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/5a05909058c04712b8d7b4ec3f3f5bc3, entries=9, sequenceid=82, filesize=14.3 K 2024-11-19T01:11:25,354 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=14.71 KB/15064 for bcf5b20058f01b9e033c7ae9cd39b33f in 24ms, sequenceid=82, compaction requested=true 2024-11-19T01:11:25,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bcf5b20058f01b9e033c7ae9cd39b33f: 2024-11-19T01:11:25,354 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-19T01:11:25,354 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:11:25,354 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/b4a215f85c13430ea9432ee80b9f8f7a because midkey is the same as first or last row 2024-11-19T01:11:25,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bcf5b20058f01b9e033c7ae9cd39b33f:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T01:11:25,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:25,354 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T01:11:25,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] regionserver.HRegion(8855): Flush requested on bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:25,355 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T01:11:25,355 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bcf5b20058f01b9e033c7ae9cd39b33f 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-19T01:11:25,355 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1541): bcf5b20058f01b9e033c7ae9cd39b33f/info is initiating minor compaction (all files) 2024-11-19T01:11:25,355 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bcf5b20058f01b9e033c7ae9cd39b33f/info in TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f. 2024-11-19T01:11:25,356 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/b4a215f85c13430ea9432ee80b9f8f7a, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/24e0f131328d426a9c37334f4b4ccb3c, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/5a05909058c04712b8d7b4ec3f3f5bc3] into tmpdir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp, totalSize=83.1 K 2024-11-19T01:11:25,356 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting b4a215f85c13430ea9432ee80b9f8f7a, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731978681222 2024-11-19T01:11:25,356 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting 24e0f131328d426a9c37334f4b4ccb3c, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=69, earliestPutTs=1731978683282 2024-11-19T01:11:25,357 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5a05909058c04712b8d7b4ec3f3f5bc3, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731978683314 2024-11-19T01:11:25,360 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/55980ce7ca124bbfbe23a311e1153a58 is 1080, key is row0066/info:/1731978685332/Put/seqid=0 2024-11-19T01:11:25,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741843_1019 (size=21141) 2024-11-19T01:11:25,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741843_1019 (size=21141) 2024-11-19T01:11:25,367 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/55980ce7ca124bbfbe23a311e1153a58 2024-11-19T01:11:25,370 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bcf5b20058f01b9e033c7ae9cd39b33f#info#compaction#62 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T01:11:25,370 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/5cc87d5b9bb7495e83b9929de50d7b71 is 1080, key is row0001/info:/1731978681222/Put/seqid=0 2024-11-19T01:11:25,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/55980ce7ca124bbfbe23a311e1153a58 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/55980ce7ca124bbfbe23a311e1153a58 2024-11-19T01:11:25,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741844_1020 (size=75378) 2024-11-19T01:11:25,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741844_1020 (size=75378) 2024-11-19T01:11:25,382 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/55980ce7ca124bbfbe23a311e1153a58, entries=15, sequenceid=100, filesize=20.6 K 2024-11-19T01:11:25,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=14.71 KB/15064 for bcf5b20058f01b9e033c7ae9cd39b33f in 29ms, sequenceid=100, compaction requested=false 2024-11-19T01:11:25,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bcf5b20058f01b9e033c7ae9cd39b33f: 2024-11-19T01:11:25,384 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=103.7 K, sizeToCheck=16.0 K 2024-11-19T01:11:25,384 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:11:25,384 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/b4a215f85c13430ea9432ee80b9f8f7a because midkey is the same as first or last row 2024-11-19T01:11:25,385 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/5cc87d5b9bb7495e83b9929de50d7b71 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/5cc87d5b9bb7495e83b9929de50d7b71 2024-11-19T01:11:25,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] regionserver.HRegion(8855): Flush requested on bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:25,385 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bcf5b20058f01b9e033c7ae9cd39b33f 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-19T01:11:25,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/d5f697740c254de392b8d037468283cf is 1080, key is row0081/info:/1731978685356/Put/seqid=0 2024-11-19T01:11:25,391 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bcf5b20058f01b9e033c7ae9cd39b33f/info of bcf5b20058f01b9e033c7ae9cd39b33f into 5cc87d5b9bb7495e83b9929de50d7b71(size=73.6 K), total size for store is 94.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T01:11:25,391 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bcf5b20058f01b9e033c7ae9cd39b33f: 2024-11-19T01:11:25,391 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f., storeName=bcf5b20058f01b9e033c7ae9cd39b33f/info, priority=13, startTime=1731978685354; duration=0sec 2024-11-19T01:11:25,391 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.3 K, sizeToCheck=16.0 K 2024-11-19T01:11:25,391 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:11:25,392 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.3 K, sizeToCheck=16.0 K 2024-11-19T01:11:25,392 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:11:25,392 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.3 K, sizeToCheck=16.0 K 2024-11-19T01:11:25,392 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:11:25,393 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:25,393 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:25,393 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bcf5b20058f01b9e033c7ae9cd39b33f:info 2024-11-19T01:11:25,394 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34829 {}] assignment.AssignmentManager(1363): Split request from 5134ffc85563,33403,1731978669885, parent={ENCODED => bcf5b20058f01b9e033c7ae9cd39b33f, NAME => 'TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-19T01:11:25,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741845_1021 (size=21141) 2024-11-19T01:11:25,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741845_1021 (size=21141) 2024-11-19T01:11:25,397 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/d5f697740c254de392b8d037468283cf 2024-11-19T01:11:25,400 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34829 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=5134ffc85563,33403,1731978669885 2024-11-19T01:11:25,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/d5f697740c254de392b8d037468283cf as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/d5f697740c254de392b8d037468283cf 2024-11-19T01:11:25,404 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34829 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bcf5b20058f01b9e033c7ae9cd39b33f, daughterA=9435b66a7d149ab73c52092d2797ae91, daughterB=8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:25,405 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bcf5b20058f01b9e033c7ae9cd39b33f, daughterA=9435b66a7d149ab73c52092d2797ae91, daughterB=8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:25,405 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bcf5b20058f01b9e033c7ae9cd39b33f, daughterA=9435b66a7d149ab73c52092d2797ae91, daughterB=8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:25,405 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bcf5b20058f01b9e033c7ae9cd39b33f, daughterA=9435b66a7d149ab73c52092d2797ae91, daughterB=8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:25,409 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/d5f697740c254de392b8d037468283cf, entries=15, sequenceid=118, filesize=20.6 K 2024-11-19T01:11:25,410 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=1.05 KB/1076 for bcf5b20058f01b9e033c7ae9cd39b33f in 25ms, sequenceid=118, compaction requested=true 2024-11-19T01:11:25,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bcf5b20058f01b9e033c7ae9cd39b33f: 2024-11-19T01:11:25,410 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=114.9 K, sizeToCheck=16.0 K 2024-11-19T01:11:25,410 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:11:25,411 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=114.9 K, sizeToCheck=16.0 K 2024-11-19T01:11:25,411 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:11:25,411 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=114.9 K, sizeToCheck=16.0 K 2024-11-19T01:11:25,411 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T01:11:25,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:25,412 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34829 {}] assignment.AssignmentManager(1363): Split request from 5134ffc85563,33403,1731978669885, parent={ENCODED => bcf5b20058f01b9e033c7ae9cd39b33f, NAME => 'TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-19T01:11:25,412 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34829 {}] assignment.AssignmentManager(1378): Ignoring split request from 5134ffc85563,33403,1731978669885, parent={ENCODED => bcf5b20058f01b9e033c7ae9cd39b33f, NAME => 'TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f.', STARTKEY => '', ENDKEY => ''} because parent is unknown or not open 2024-11-19T01:11:25,413 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bcf5b20058f01b9e033c7ae9cd39b33f, UNASSIGN}] 2024-11-19T01:11:25,414 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bcf5b20058f01b9e033c7ae9cd39b33f, UNASSIGN 2024-11-19T01:11:25,416 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=bcf5b20058f01b9e033c7ae9cd39b33f, regionState=CLOSING, regionLocation=5134ffc85563,33403,1731978669885 2024-11-19T01:11:25,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bcf5b20058f01b9e033c7ae9cd39b33f, UNASSIGN because future has completed 2024-11-19T01:11:25,418 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-19T01:11:25,419 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure bcf5b20058f01b9e033c7ae9cd39b33f, server=5134ffc85563,33403,1731978669885}] 2024-11-19T01:11:25,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:25,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:25,577 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:25,577 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-19T01:11:25,577 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing bcf5b20058f01b9e033c7ae9cd39b33f, disabling compactions & flushes 2024-11-19T01:11:25,578 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f. 2024-11-19T01:11:25,578 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f. 2024-11-19T01:11:25,578 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f. after waiting 0 ms 2024-11-19T01:11:25,578 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f. 2024-11-19T01:11:25,578 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing bcf5b20058f01b9e033c7ae9cd39b33f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T01:11:25,582 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/95a6866082844b518b10337ccb72c746 is 1080, key is row0096/info:/1731978685386/Put/seqid=0 2024-11-19T01:11:25,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741846_1022 (size=6033) 2024-11-19T01:11:25,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741846_1022 (size=6033) 2024-11-19T01:11:25,587 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/95a6866082844b518b10337ccb72c746 2024-11-19T01:11:25,592 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/.tmp/info/95a6866082844b518b10337ccb72c746 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/95a6866082844b518b10337ccb72c746 2024-11-19T01:11:25,597 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/95a6866082844b518b10337ccb72c746, entries=1, sequenceid=123, filesize=5.9 K 2024-11-19T01:11:25,598 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for bcf5b20058f01b9e033c7ae9cd39b33f in 20ms, sequenceid=123, compaction requested=true 2024-11-19T01:11:25,599 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/afc62e9f11924915aace9e599313070c, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/bcc1df2d4f8c411c90fd4530f4c35c00, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/b4a215f85c13430ea9432ee80b9f8f7a, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/4d3a3fa673384c5b8febf973b59ab425, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/24e0f131328d426a9c37334f4b4ccb3c, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/5a05909058c04712b8d7b4ec3f3f5bc3] to archive 2024-11-19T01:11:25,599 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T01:11:25,601 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/afc62e9f11924915aace9e599313070c to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/afc62e9f11924915aace9e599313070c 2024-11-19T01:11:25,602 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/bcc1df2d4f8c411c90fd4530f4c35c00 to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/bcc1df2d4f8c411c90fd4530f4c35c00 2024-11-19T01:11:25,603 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/b4a215f85c13430ea9432ee80b9f8f7a to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/b4a215f85c13430ea9432ee80b9f8f7a 2024-11-19T01:11:25,604 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/4d3a3fa673384c5b8febf973b59ab425 to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/4d3a3fa673384c5b8febf973b59ab425 2024-11-19T01:11:25,605 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/24e0f131328d426a9c37334f4b4ccb3c to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/24e0f131328d426a9c37334f4b4ccb3c 2024-11-19T01:11:25,606 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/5a05909058c04712b8d7b4ec3f3f5bc3 to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/5a05909058c04712b8d7b4ec3f3f5bc3 2024-11-19T01:11:25,612 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-11-19T01:11:25,613 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f. 2024-11-19T01:11:25,613 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for bcf5b20058f01b9e033c7ae9cd39b33f: Waiting for close lock at 1731978685577Running coprocessor pre-close hooks at 1731978685577Disabling compacts and flushes for region at 1731978685577Disabling writes for close at 1731978685578 (+1 ms)Obtaining lock to block concurrent updates at 1731978685578Preparing flush snapshotting stores in bcf5b20058f01b9e033c7ae9cd39b33f at 1731978685578Finished memstore snapshotting TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731978685578Flushing stores of TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f. at 1731978685579 (+1 ms)Flushing bcf5b20058f01b9e033c7ae9cd39b33f/info: creating writer at 1731978685579Flushing bcf5b20058f01b9e033c7ae9cd39b33f/info: appending metadata at 1731978685581 (+2 ms)Flushing bcf5b20058f01b9e033c7ae9cd39b33f/info: closing flushed file at 1731978685582 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6cff5f0e: reopening flushed file at 1731978685591 (+9 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for bcf5b20058f01b9e033c7ae9cd39b33f in 20ms, sequenceid=123, compaction requested=true at 1731978685598 (+7 ms)Writing region close event to WAL at 1731978685609 (+11 ms)Running coprocessor post-close hooks at 1731978685612 (+3 ms)Closed at 1731978685612 2024-11-19T01:11:25,615 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:25,615 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=bcf5b20058f01b9e033c7ae9cd39b33f, regionState=CLOSED 2024-11-19T01:11:25,617 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure bcf5b20058f01b9e033c7ae9cd39b33f, server=5134ffc85563,33403,1731978669885 because future has completed 2024-11-19T01:11:25,620 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-19T01:11:25,620 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure bcf5b20058f01b9e033c7ae9cd39b33f, server=5134ffc85563,33403,1731978669885 in 199 msec 2024-11-19T01:11:25,623 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-19T01:11:25,623 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bcf5b20058f01b9e033c7ae9cd39b33f, UNASSIGN in 207 msec 2024-11-19T01:11:25,629 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:11:25,633 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=bcf5b20058f01b9e033c7ae9cd39b33f, threads=4 2024-11-19T01:11:25,635 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/d5f697740c254de392b8d037468283cf for region: bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:25,635 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/5cc87d5b9bb7495e83b9929de50d7b71 for region: bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:25,635 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/95a6866082844b518b10337ccb72c746 for region: bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:25,635 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/55980ce7ca124bbfbe23a311e1153a58 for region: bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:25,646 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/55980ce7ca124bbfbe23a311e1153a58, top=true 2024-11-19T01:11:25,646 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/d5f697740c254de392b8d037468283cf, top=true 2024-11-19T01:11:25,647 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/95a6866082844b518b10337ccb72c746, top=true 2024-11-19T01:11:25,651 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-d5f697740c254de392b8d037468283cf for child: 8e64315a6f596425fa397bb4426ef86e, parent: bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:25,651 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-55980ce7ca124bbfbe23a311e1153a58 for child: 8e64315a6f596425fa397bb4426ef86e, parent: bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:25,651 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/d5f697740c254de392b8d037468283cf for region: bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:25,651 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-95a6866082844b518b10337ccb72c746 for child: 8e64315a6f596425fa397bb4426ef86e, parent: bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:25,651 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/55980ce7ca124bbfbe23a311e1153a58 for region: bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:25,651 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/95a6866082844b518b10337ccb72c746 for region: bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:25,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741847_1023 (size=27) 2024-11-19T01:11:25,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741847_1023 (size=27) 2024-11-19T01:11:25,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741848_1024 (size=27) 2024-11-19T01:11:25,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741848_1024 (size=27) 2024-11-19T01:11:25,675 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/5cc87d5b9bb7495e83b9929de50d7b71 for region: bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:25,677 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region bcf5b20058f01b9e033c7ae9cd39b33f Daughter A: [hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/9435b66a7d149ab73c52092d2797ae91/info/5cc87d5b9bb7495e83b9929de50d7b71.bcf5b20058f01b9e033c7ae9cd39b33f] storefiles, Daughter B: [hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-55980ce7ca124bbfbe23a311e1153a58, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/5cc87d5b9bb7495e83b9929de50d7b71.bcf5b20058f01b9e033c7ae9cd39b33f, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-95a6866082844b518b10337ccb72c746, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-d5f697740c254de392b8d037468283cf] storefiles. 2024-11-19T01:11:25,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741849_1025 (size=71) 2024-11-19T01:11:25,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741849_1025 (size=71) 2024-11-19T01:11:25,687 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:11:25,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741850_1026 (size=71) 2024-11-19T01:11:25,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741850_1026 (size=71) 2024-11-19T01:11:25,699 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:11:25,709 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/9435b66a7d149ab73c52092d2797ae91/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-19T01:11:25,711 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-19T01:11:25,714 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731978685713"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731978685713"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731978685713"}]},"ts":"1731978685713"} 2024-11-19T01:11:25,714 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731978685713"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731978685713"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731978685713"}]},"ts":"1731978685713"} 2024-11-19T01:11:25,714 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731978685713"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731978685713"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731978685713"}]},"ts":"1731978685713"} 2024-11-19T01:11:25,733 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9435b66a7d149ab73c52092d2797ae91, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8e64315a6f596425fa397bb4426ef86e, ASSIGN}] 2024-11-19T01:11:25,734 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9435b66a7d149ab73c52092d2797ae91, ASSIGN 2024-11-19T01:11:25,734 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8e64315a6f596425fa397bb4426ef86e, ASSIGN 2024-11-19T01:11:25,735 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9435b66a7d149ab73c52092d2797ae91, ASSIGN; state=SPLITTING_NEW, location=5134ffc85563,33403,1731978669885; forceNewPlan=false, retain=false 2024-11-19T01:11:25,735 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8e64315a6f596425fa397bb4426ef86e, ASSIGN; state=SPLITTING_NEW, location=5134ffc85563,33403,1731978669885; forceNewPlan=false, retain=false 2024-11-19T01:11:25,885 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=9435b66a7d149ab73c52092d2797ae91, regionState=OPENING, regionLocation=5134ffc85563,33403,1731978669885 2024-11-19T01:11:25,885 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=8e64315a6f596425fa397bb4426ef86e, regionState=OPENING, regionLocation=5134ffc85563,33403,1731978669885 2024-11-19T01:11:25,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9435b66a7d149ab73c52092d2797ae91, ASSIGN because future has completed 2024-11-19T01:11:25,888 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9435b66a7d149ab73c52092d2797ae91, server=5134ffc85563,33403,1731978669885}] 2024-11-19T01:11:25,889 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8e64315a6f596425fa397bb4426ef86e, ASSIGN because future has completed 2024-11-19T01:11:25,889 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8e64315a6f596425fa397bb4426ef86e, server=5134ffc85563,33403,1731978669885}] 2024-11-19T01:11:26,044 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91. 2024-11-19T01:11:26,044 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 9435b66a7d149ab73c52092d2797ae91, NAME => 'TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-19T01:11:26,045 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 9435b66a7d149ab73c52092d2797ae91 2024-11-19T01:11:26,045 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:11:26,045 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 9435b66a7d149ab73c52092d2797ae91 2024-11-19T01:11:26,045 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 9435b66a7d149ab73c52092d2797ae91 2024-11-19T01:11:26,046 INFO [StoreOpener-9435b66a7d149ab73c52092d2797ae91-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9435b66a7d149ab73c52092d2797ae91 2024-11-19T01:11:26,047 INFO [StoreOpener-9435b66a7d149ab73c52092d2797ae91-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9435b66a7d149ab73c52092d2797ae91 columnFamilyName info 2024-11-19T01:11:26,047 DEBUG [StoreOpener-9435b66a7d149ab73c52092d2797ae91-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:11:26,057 DEBUG [StoreOpener-9435b66a7d149ab73c52092d2797ae91-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/9435b66a7d149ab73c52092d2797ae91/info/5cc87d5b9bb7495e83b9929de50d7b71.bcf5b20058f01b9e033c7ae9cd39b33f->hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/5cc87d5b9bb7495e83b9929de50d7b71-bottom 2024-11-19T01:11:26,058 INFO [StoreOpener-9435b66a7d149ab73c52092d2797ae91-1 {}] regionserver.HStore(327): Store=9435b66a7d149ab73c52092d2797ae91/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:11:26,058 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 9435b66a7d149ab73c52092d2797ae91 2024-11-19T01:11:26,059 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/9435b66a7d149ab73c52092d2797ae91 2024-11-19T01:11:26,060 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/9435b66a7d149ab73c52092d2797ae91 2024-11-19T01:11:26,060 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 9435b66a7d149ab73c52092d2797ae91 2024-11-19T01:11:26,060 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 9435b66a7d149ab73c52092d2797ae91 2024-11-19T01:11:26,062 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 9435b66a7d149ab73c52092d2797ae91 2024-11-19T01:11:26,063 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 9435b66a7d149ab73c52092d2797ae91; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=876726, jitterRate=0.11481483280658722}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T01:11:26,063 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9435b66a7d149ab73c52092d2797ae91 2024-11-19T01:11:26,064 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 9435b66a7d149ab73c52092d2797ae91: Running coprocessor pre-open hook at 1731978686045Writing region info on filesystem at 1731978686045Initializing all the Stores at 1731978686046 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978686046Cleaning up temporary data from old regions at 1731978686060 (+14 ms)Running coprocessor post-open hooks at 1731978686063 (+3 ms)Region opened successfully at 1731978686064 (+1 ms) 2024-11-19T01:11:26,065 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91., pid=12, masterSystemTime=1731978686040 2024-11-19T01:11:26,065 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 9435b66a7d149ab73c52092d2797ae91:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T01:11:26,065 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-19T01:11:26,065 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:26,066 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91. 2024-11-19T01:11:26,066 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1541): 9435b66a7d149ab73c52092d2797ae91/info is initiating minor compaction (all files) 2024-11-19T01:11:26,066 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9435b66a7d149ab73c52092d2797ae91/info in TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91. 2024-11-19T01:11:26,066 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/9435b66a7d149ab73c52092d2797ae91/info/5cc87d5b9bb7495e83b9929de50d7b71.bcf5b20058f01b9e033c7ae9cd39b33f->hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/5cc87d5b9bb7495e83b9929de50d7b71-bottom] into tmpdir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/9435b66a7d149ab73c52092d2797ae91/.tmp, totalSize=73.6 K 2024-11-19T01:11:26,067 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5cc87d5b9bb7495e83b9929de50d7b71.bcf5b20058f01b9e033c7ae9cd39b33f, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731978681222 2024-11-19T01:11:26,067 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91. 2024-11-19T01:11:26,067 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91. 2024-11-19T01:11:26,067 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e. 2024-11-19T01:11:26,068 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 8e64315a6f596425fa397bb4426ef86e, NAME => 'TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-19T01:11:26,068 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:26,068 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:11:26,068 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=9435b66a7d149ab73c52092d2797ae91, regionState=OPEN, openSeqNum=127, regionLocation=5134ffc85563,33403,1731978669885 2024-11-19T01:11:26,068 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:26,068 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:26,070 INFO [StoreOpener-8e64315a6f596425fa397bb4426ef86e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:26,070 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33403 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-19T01:11:26,070 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-19T01:11:26,070 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-19T01:11:26,070 INFO [StoreOpener-8e64315a6f596425fa397bb4426ef86e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8e64315a6f596425fa397bb4426ef86e columnFamilyName info 2024-11-19T01:11:26,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9435b66a7d149ab73c52092d2797ae91, server=5134ffc85563,33403,1731978669885 because future has completed 2024-11-19T01:11:26,071 DEBUG [StoreOpener-8e64315a6f596425fa397bb4426ef86e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:11:26,075 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-19T01:11:26,075 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 9435b66a7d149ab73c52092d2797ae91, server=5134ffc85563,33403,1731978669885 in 184 msec 2024-11-19T01:11:26,077 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9435b66a7d149ab73c52092d2797ae91, ASSIGN in 342 msec 2024-11-19T01:11:26,079 DEBUG [StoreOpener-8e64315a6f596425fa397bb4426ef86e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/5cc87d5b9bb7495e83b9929de50d7b71.bcf5b20058f01b9e033c7ae9cd39b33f->hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/5cc87d5b9bb7495e83b9929de50d7b71-top 2024-11-19T01:11:26,084 DEBUG [StoreOpener-8e64315a6f596425fa397bb4426ef86e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-55980ce7ca124bbfbe23a311e1153a58 2024-11-19T01:11:26,088 DEBUG [StoreOpener-8e64315a6f596425fa397bb4426ef86e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-95a6866082844b518b10337ccb72c746 2024-11-19T01:11:26,091 DEBUG [StoreOpener-8e64315a6f596425fa397bb4426ef86e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-d5f697740c254de392b8d037468283cf 2024-11-19T01:11:26,091 INFO [StoreOpener-8e64315a6f596425fa397bb4426ef86e-1 {}] regionserver.HStore(327): Store=8e64315a6f596425fa397bb4426ef86e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:11:26,091 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9435b66a7d149ab73c52092d2797ae91#info#compaction#65 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T01:11:26,091 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:26,092 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/9435b66a7d149ab73c52092d2797ae91/.tmp/info/ec609df74986457c9a43a895c72623d6 is 1080, key is row0001/info:/1731978681222/Put/seqid=0 2024-11-19T01:11:26,092 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:26,092 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/.tmp/info/21dd4134475c4625bae3c6880110b6fa is 193, key is TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e./info:regioninfo/1731978685885/Put/seqid=0 2024-11-19T01:11:26,093 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:26,094 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:26,094 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:26,096 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:26,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741851_1027 (size=70862) 2024-11-19T01:11:26,097 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 8e64315a6f596425fa397bb4426ef86e; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778875, jitterRate=-0.009610027074813843}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T01:11:26,097 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:26,097 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 8e64315a6f596425fa397bb4426ef86e: Running coprocessor pre-open hook at 1731978686068Writing region info on filesystem at 1731978686068Initializing all the Stores at 1731978686069 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978686069Cleaning up temporary data from old regions at 1731978686094 (+25 ms)Running coprocessor post-open hooks at 1731978686097 (+3 ms)Region opened successfully at 1731978686097 2024-11-19T01:11:26,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741851_1027 (size=70862) 2024-11-19T01:11:26,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741852_1028 (size=9847) 2024-11-19T01:11:26,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741852_1028 (size=9847) 2024-11-19T01:11:26,098 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e., pid=13, masterSystemTime=1731978686040 2024-11-19T01:11:26,098 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 8e64315a6f596425fa397bb4426ef86e:info, priority=-2147483648, current under compaction store size is 2 2024-11-19T01:11:26,098 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:26,098 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-19T01:11:26,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/.tmp/info/21dd4134475c4625bae3c6880110b6fa 2024-11-19T01:11:26,100 INFO [RS:0;5134ffc85563:33403-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e. 2024-11-19T01:11:26,100 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] regionserver.HStore(1541): 8e64315a6f596425fa397bb4426ef86e/info is initiating minor compaction (all files) 2024-11-19T01:11:26,100 INFO [RS:0;5134ffc85563:33403-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8e64315a6f596425fa397bb4426ef86e/info in TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e. 2024-11-19T01:11:26,101 INFO [RS:0;5134ffc85563:33403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/5cc87d5b9bb7495e83b9929de50d7b71.bcf5b20058f01b9e033c7ae9cd39b33f->hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/5cc87d5b9bb7495e83b9929de50d7b71-top, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-55980ce7ca124bbfbe23a311e1153a58, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-d5f697740c254de392b8d037468283cf, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-95a6866082844b518b10337ccb72c746] into tmpdir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp, totalSize=120.8 K 2024-11-19T01:11:26,101 DEBUG [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e. 2024-11-19T01:11:26,101 INFO [RS_OPEN_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e. 2024-11-19T01:11:26,101 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] compactions.Compactor(225): Compacting 5cc87d5b9bb7495e83b9929de50d7b71.bcf5b20058f01b9e033c7ae9cd39b33f, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1731978681222 2024-11-19T01:11:26,102 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-55980ce7ca124bbfbe23a311e1153a58, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1731978685332 2024-11-19T01:11:26,102 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=8e64315a6f596425fa397bb4426ef86e, regionState=OPEN, openSeqNum=127, regionLocation=5134ffc85563,33403,1731978669885 2024-11-19T01:11:26,102 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-d5f697740c254de392b8d037468283cf, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1731978685356 2024-11-19T01:11:26,102 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-95a6866082844b518b10337ccb72c746, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731978685386 2024-11-19T01:11:26,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8e64315a6f596425fa397bb4426ef86e, server=5134ffc85563,33403,1731978669885 because future has completed 2024-11-19T01:11:26,104 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/9435b66a7d149ab73c52092d2797ae91/.tmp/info/ec609df74986457c9a43a895c72623d6 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/9435b66a7d149ab73c52092d2797ae91/info/ec609df74986457c9a43a895c72623d6 2024-11-19T01:11:26,108 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-19T01:11:26,108 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 8e64315a6f596425fa397bb4426ef86e, server=5134ffc85563,33403,1731978669885 in 216 msec 2024-11-19T01:11:26,111 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-19T01:11:26,111 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8e64315a6f596425fa397bb4426ef86e, ASSIGN in 375 msec 2024-11-19T01:11:26,112 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 9435b66a7d149ab73c52092d2797ae91/info of 9435b66a7d149ab73c52092d2797ae91 into ec609df74986457c9a43a895c72623d6(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T01:11:26,112 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9435b66a7d149ab73c52092d2797ae91: 2024-11-19T01:11:26,112 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91., storeName=9435b66a7d149ab73c52092d2797ae91/info, priority=15, startTime=1731978686065; duration=0sec 2024-11-19T01:11:26,112 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:26,112 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9435b66a7d149ab73c52092d2797ae91:info 2024-11-19T01:11:26,113 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bcf5b20058f01b9e033c7ae9cd39b33f, daughterA=9435b66a7d149ab73c52092d2797ae91, daughterB=8e64315a6f596425fa397bb4426ef86e in 711 msec 2024-11-19T01:11:26,120 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/.tmp/ns/85fb8f8cc477438684ed40fd0072787e is 43, key is default/ns:d/1731978671080/Put/seqid=0 2024-11-19T01:11:26,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741853_1029 (size=5153) 2024-11-19T01:11:26,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741853_1029 (size=5153) 2024-11-19T01:11:26,126 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/.tmp/ns/85fb8f8cc477438684ed40fd0072787e 2024-11-19T01:11:26,127 INFO [RS:0;5134ffc85563:33403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e64315a6f596425fa397bb4426ef86e#info#compaction#68 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T01:11:26,127 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/24ebbd920eaa4e4aa131736b43cb2a65 is 1080, key is row0062/info:/1731978683323/Put/seqid=0 2024-11-19T01:11:26,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741854_1030 (size=43081) 2024-11-19T01:11:26,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741854_1030 (size=43081) 2024-11-19T01:11:26,138 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/24ebbd920eaa4e4aa131736b43cb2a65 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/24ebbd920eaa4e4aa131736b43cb2a65 2024-11-19T01:11:26,144 INFO [RS:0;5134ffc85563:33403-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 8e64315a6f596425fa397bb4426ef86e/info of 8e64315a6f596425fa397bb4426ef86e into 24ebbd920eaa4e4aa131736b43cb2a65(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T01:11:26,144 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8e64315a6f596425fa397bb4426ef86e: 2024-11-19T01:11:26,144 INFO [RS:0;5134ffc85563:33403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e., storeName=8e64315a6f596425fa397bb4426ef86e/info, priority=12, startTime=1731978686098; duration=0sec 2024-11-19T01:11:26,144 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:26,144 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e64315a6f596425fa397bb4426ef86e:info 2024-11-19T01:11:26,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/.tmp/table/8dda2c32258e40cabb1cfe3331521635 is 65, key is TestLogRolling-testLogRolling/table:state/1731978671521/Put/seqid=0 2024-11-19T01:11:26,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741855_1031 (size=5340) 2024-11-19T01:11:26,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741855_1031 (size=5340) 2024-11-19T01:11:26,151 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/.tmp/table/8dda2c32258e40cabb1cfe3331521635 2024-11-19T01:11:26,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/.tmp/info/21dd4134475c4625bae3c6880110b6fa as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/info/21dd4134475c4625bae3c6880110b6fa 2024-11-19T01:11:26,161 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/info/21dd4134475c4625bae3c6880110b6fa, entries=30, sequenceid=17, filesize=9.6 K 2024-11-19T01:11:26,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/.tmp/ns/85fb8f8cc477438684ed40fd0072787e as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/ns/85fb8f8cc477438684ed40fd0072787e 2024-11-19T01:11:26,167 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/ns/85fb8f8cc477438684ed40fd0072787e, entries=2, sequenceid=17, filesize=5.0 K 2024-11-19T01:11:26,168 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/.tmp/table/8dda2c32258e40cabb1cfe3331521635 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/table/8dda2c32258e40cabb1cfe3331521635 2024-11-19T01:11:26,173 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/table/8dda2c32258e40cabb1cfe3331521635, entries=2, sequenceid=17, filesize=5.2 K 2024-11-19T01:11:26,174 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 103ms, sequenceid=17, compaction requested=false 2024-11-19T01:11:26,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-19T01:11:26,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:26,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:27,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53142 deadline: 1731978697388, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f. is not online on 5134ffc85563,33403,1731978669885 2024-11-19T01:11:27,422 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f., hostname=5134ffc85563,33403,1731978669885, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f., hostname=5134ffc85563,33403,1731978669885, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f. is not online on 5134ffc85563,33403,1731978669885 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T01:11:27,423 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f., hostname=5134ffc85563,33403,1731978669885, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f. is not online on 5134ffc85563,33403,1731978669885 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T01:11:27,423 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731978671147.bcf5b20058f01b9e033c7ae9cd39b33f., hostname=5134ffc85563,33403,1731978669885, seqNum=2 from cache 2024-11-19T01:11:27,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:27,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:28,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:28,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:29,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:29,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:30,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:30,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:30,613 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:30,613 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:30,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:30,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:30,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:30,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:30,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:30,615 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:30,636 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:30,636 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:30,636 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:30,637 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:30,637 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:30,637 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:30,640 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:30,640 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:30,640 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:30,643 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:31,151 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T01:11:31,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:31,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:31,154 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:31,154 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:31,154 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:31,154 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:31,155 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:31,155 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:31,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:31,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:31,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:31,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:31,185 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:31,185 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:31,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:31,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:31,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:31,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T01:11:31,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:31,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:32,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:32,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:33,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:33,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:34,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:34,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:35,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:35,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:36,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:36,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:37,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:37,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:37,541 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e., hostname=5134ffc85563,33403,1731978669885, seqNum=127] 2024-11-19T01:11:37,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] regionserver.HRegion(8855): Flush requested on 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:37,554 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8e64315a6f596425fa397bb4426ef86e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T01:11:37,561 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/944b4d9a1d214d419805d2f467e73f60 is 1080, key is row0097/info:/1731978697543/Put/seqid=0 2024-11-19T01:11:37,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741856_1032 (size=12516) 2024-11-19T01:11:37,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741856_1032 (size=12516) 2024-11-19T01:11:37,572 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/944b4d9a1d214d419805d2f467e73f60 2024-11-19T01:11:37,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/944b4d9a1d214d419805d2f467e73f60 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/944b4d9a1d214d419805d2f467e73f60 2024-11-19T01:11:37,597 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/944b4d9a1d214d419805d2f467e73f60, entries=7, sequenceid=137, filesize=12.2 K 2024-11-19T01:11:37,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for 8e64315a6f596425fa397bb4426ef86e in 43ms, sequenceid=137, compaction requested=false 2024-11-19T01:11:37,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8e64315a6f596425fa397bb4426ef86e: 2024-11-19T01:11:37,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] regionserver.HRegion(8855): Flush requested on 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:37,599 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8e64315a6f596425fa397bb4426ef86e 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-11-19T01:11:37,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/9bc2cf18ef6843c393214dd67a686153 is 1080, key is row0104/info:/1731978697555/Put/seqid=0 2024-11-19T01:11:37,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741857_1033 (size=28706) 2024-11-19T01:11:37,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741857_1033 (size=28706) 2024-11-19T01:11:37,617 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=23.12 KB at sequenceid=162 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/9bc2cf18ef6843c393214dd67a686153 2024-11-19T01:11:37,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/9bc2cf18ef6843c393214dd67a686153 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/9bc2cf18ef6843c393214dd67a686153 2024-11-19T01:11:37,631 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/9bc2cf18ef6843c393214dd67a686153, entries=22, sequenceid=162, filesize=28.0 K 2024-11-19T01:11:37,633 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.12 KB/23672, heapSize ~24.98 KB/25584, currentSize=3.15 KB/3228 for 8e64315a6f596425fa397bb4426ef86e in 33ms, sequenceid=162, compaction requested=true 2024-11-19T01:11:37,633 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8e64315a6f596425fa397bb4426ef86e: 2024-11-19T01:11:37,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e64315a6f596425fa397bb4426ef86e:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T01:11:37,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:37,633 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T01:11:37,634 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84303 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T01:11:37,634 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1541): 8e64315a6f596425fa397bb4426ef86e/info is initiating minor compaction (all files) 2024-11-19T01:11:37,634 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8e64315a6f596425fa397bb4426ef86e/info in TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e. 2024-11-19T01:11:37,634 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/24ebbd920eaa4e4aa131736b43cb2a65, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/944b4d9a1d214d419805d2f467e73f60, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/9bc2cf18ef6843c393214dd67a686153] into tmpdir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp, totalSize=82.3 K 2024-11-19T01:11:37,634 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting 24ebbd920eaa4e4aa131736b43cb2a65, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731978683323 2024-11-19T01:11:37,635 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting 944b4d9a1d214d419805d2f467e73f60, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731978697543 2024-11-19T01:11:37,635 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9bc2cf18ef6843c393214dd67a686153, keycount=22, bloomtype=ROW, size=28.0 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1731978697555 2024-11-19T01:11:37,647 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e64315a6f596425fa397bb4426ef86e#info#compaction#72 average throughput is 32.84 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T01:11:37,647 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/1af2567a9d1c4dcaa61b1ee9d936946a is 1080, key is row0062/info:/1731978683323/Put/seqid=0 2024-11-19T01:11:37,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741858_1034 (size=74586) 2024-11-19T01:11:37,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741858_1034 (size=74586) 2024-11-19T01:11:37,663 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/1af2567a9d1c4dcaa61b1ee9d936946a as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/1af2567a9d1c4dcaa61b1ee9d936946a 2024-11-19T01:11:37,670 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8e64315a6f596425fa397bb4426ef86e/info of 8e64315a6f596425fa397bb4426ef86e into 1af2567a9d1c4dcaa61b1ee9d936946a(size=72.8 K), total size for store is 72.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T01:11:37,670 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8e64315a6f596425fa397bb4426ef86e: 2024-11-19T01:11:37,670 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e., storeName=8e64315a6f596425fa397bb4426ef86e/info, priority=13, startTime=1731978697633; duration=0sec 2024-11-19T01:11:37,670 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:37,670 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e64315a6f596425fa397bb4426ef86e:info 2024-11-19T01:11:38,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:38,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:39,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:39,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:39,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] regionserver.HRegion(8855): Flush requested on 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:39,613 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8e64315a6f596425fa397bb4426ef86e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T01:11:39,617 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/4ac21276ec0f418a92f5e87009eb04e9 is 1080, key is row0126/info:/1731978697600/Put/seqid=0 2024-11-19T01:11:39,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741859_1035 (size=12516) 2024-11-19T01:11:39,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741859_1035 (size=12516) 2024-11-19T01:11:39,624 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/4ac21276ec0f418a92f5e87009eb04e9 2024-11-19T01:11:39,630 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/4ac21276ec0f418a92f5e87009eb04e9 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/4ac21276ec0f418a92f5e87009eb04e9 2024-11-19T01:11:39,637 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/4ac21276ec0f418a92f5e87009eb04e9, entries=7, sequenceid=173, filesize=12.2 K 2024-11-19T01:11:39,638 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 8e64315a6f596425fa397bb4426ef86e in 26ms, sequenceid=173, compaction requested=false 2024-11-19T01:11:39,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8e64315a6f596425fa397bb4426ef86e: 2024-11-19T01:11:39,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] regionserver.HRegion(8855): Flush requested on 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:39,641 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8e64315a6f596425fa397bb4426ef86e 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-19T01:11:39,646 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/4b7aacfcbc6648a0bccbd607f0ba99a1 is 1080, key is row0133/info:/1731978699614/Put/seqid=0 2024-11-19T01:11:39,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741860_1036 (size=19000) 2024-11-19T01:11:39,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741860_1036 (size=19000) 2024-11-19T01:11:39,661 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=189 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/4b7aacfcbc6648a0bccbd607f0ba99a1 2024-11-19T01:11:39,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/4b7aacfcbc6648a0bccbd607f0ba99a1 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/4b7aacfcbc6648a0bccbd607f0ba99a1 2024-11-19T01:11:39,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/4b7aacfcbc6648a0bccbd607f0ba99a1, entries=13, sequenceid=189, filesize=18.6 K 2024-11-19T01:11:39,709 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=13.66 KB/13988 for 8e64315a6f596425fa397bb4426ef86e in 69ms, sequenceid=189, compaction requested=true 2024-11-19T01:11:39,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8e64315a6f596425fa397bb4426ef86e: 2024-11-19T01:11:39,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e64315a6f596425fa397bb4426ef86e:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T01:11:39,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:39,710 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T01:11:39,711 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 106102 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T01:11:39,711 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1541): 8e64315a6f596425fa397bb4426ef86e/info is initiating minor compaction (all files) 2024-11-19T01:11:39,712 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8e64315a6f596425fa397bb4426ef86e/info in TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e. 2024-11-19T01:11:39,712 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/1af2567a9d1c4dcaa61b1ee9d936946a, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/4ac21276ec0f418a92f5e87009eb04e9, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/4b7aacfcbc6648a0bccbd607f0ba99a1] into tmpdir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp, totalSize=103.6 K 2024-11-19T01:11:39,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] regionserver.HRegion(8855): Flush requested on 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:39,712 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8e64315a6f596425fa397bb4426ef86e 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-19T01:11:39,712 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1af2567a9d1c4dcaa61b1ee9d936946a, keycount=64, bloomtype=ROW, size=72.8 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1731978683323 2024-11-19T01:11:39,713 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4ac21276ec0f418a92f5e87009eb04e9, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1731978697600 2024-11-19T01:11:39,713 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4b7aacfcbc6648a0bccbd607f0ba99a1, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1731978699614 2024-11-19T01:11:39,727 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/e9ac1d3082cf40feac4a66f5456ca30f is 1080, key is row0146/info:/1731978699642/Put/seqid=0 2024-11-19T01:11:39,753 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e64315a6f596425fa397bb4426ef86e#info#compaction#76 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T01:11:39,753 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/eec5b3388f594ada96fb872355ee944d is 1080, key is row0062/info:/1731978683323/Put/seqid=0 2024-11-19T01:11:39,787 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T01:11:39,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741861_1037 (size=21156) 2024-11-19T01:11:39,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741861_1037 (size=21156) 2024-11-19T01:11:39,806 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/e9ac1d3082cf40feac4a66f5456ca30f 2024-11-19T01:11:39,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741862_1038 (size=96252) 2024-11-19T01:11:39,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741862_1038 (size=96252) 2024-11-19T01:11:39,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/e9ac1d3082cf40feac4a66f5456ca30f as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/e9ac1d3082cf40feac4a66f5456ca30f 2024-11-19T01:11:39,820 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/eec5b3388f594ada96fb872355ee944d as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/eec5b3388f594ada96fb872355ee944d 2024-11-19T01:11:39,859 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/e9ac1d3082cf40feac4a66f5456ca30f, entries=15, sequenceid=207, filesize=20.7 K 2024-11-19T01:11:39,861 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=0 B/0 for 8e64315a6f596425fa397bb4426ef86e in 149ms, sequenceid=207, compaction requested=false 2024-11-19T01:11:39,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8e64315a6f596425fa397bb4426ef86e: 2024-11-19T01:11:39,867 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8e64315a6f596425fa397bb4426ef86e/info of 8e64315a6f596425fa397bb4426ef86e into eec5b3388f594ada96fb872355ee944d(size=94.0 K), total size for store is 114.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T01:11:39,867 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8e64315a6f596425fa397bb4426ef86e: 2024-11-19T01:11:39,867 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e., storeName=8e64315a6f596425fa397bb4426ef86e/info, priority=13, startTime=1731978699710; duration=0sec 2024-11-19T01:11:39,867 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:39,867 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e64315a6f596425fa397bb4426ef86e:info 2024-11-19T01:11:40,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:40,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:41,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:41,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:41,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] regionserver.HRegion(8855): Flush requested on 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:41,726 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8e64315a6f596425fa397bb4426ef86e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T01:11:41,731 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/5951d12374c54f6ebb7875a9e9ffaa14 is 1080, key is row0161/info:/1731978701715/Put/seqid=0 2024-11-19T01:11:41,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741863_1039 (size=12516) 2024-11-19T01:11:41,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741863_1039 (size=12516) 2024-11-19T01:11:41,743 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=218 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/5951d12374c54f6ebb7875a9e9ffaa14 2024-11-19T01:11:41,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/5951d12374c54f6ebb7875a9e9ffaa14 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/5951d12374c54f6ebb7875a9e9ffaa14 2024-11-19T01:11:41,758 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/5951d12374c54f6ebb7875a9e9ffaa14, entries=7, sequenceid=218, filesize=12.2 K 2024-11-19T01:11:41,759 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for 8e64315a6f596425fa397bb4426ef86e in 33ms, sequenceid=218, compaction requested=true 2024-11-19T01:11:41,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8e64315a6f596425fa397bb4426ef86e: 2024-11-19T01:11:41,759 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T01:11:41,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e64315a6f596425fa397bb4426ef86e:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T01:11:41,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:41,760 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 129924 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T01:11:41,760 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] regionserver.HStore(1541): 8e64315a6f596425fa397bb4426ef86e/info is initiating minor compaction (all files) 2024-11-19T01:11:41,760 INFO [RS:0;5134ffc85563:33403-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8e64315a6f596425fa397bb4426ef86e/info in TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e. 2024-11-19T01:11:41,760 INFO [RS:0;5134ffc85563:33403-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/eec5b3388f594ada96fb872355ee944d, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/e9ac1d3082cf40feac4a66f5456ca30f, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/5951d12374c54f6ebb7875a9e9ffaa14] into tmpdir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp, totalSize=126.9 K 2024-11-19T01:11:41,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] regionserver.HRegion(8855): Flush requested on 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:41,761 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8e64315a6f596425fa397bb4426ef86e 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-19T01:11:41,761 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] compactions.Compactor(225): Compacting eec5b3388f594ada96fb872355ee944d, keycount=84, bloomtype=ROW, size=94.0 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1731978683323 2024-11-19T01:11:41,761 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] compactions.Compactor(225): Compacting e9ac1d3082cf40feac4a66f5456ca30f, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1731978699642 2024-11-19T01:11:41,762 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] compactions.Compactor(225): Compacting 5951d12374c54f6ebb7875a9e9ffaa14, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1731978701715 2024-11-19T01:11:41,766 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/367b07bcfe5441ca9fd0096c5784626d is 1080, key is row0168/info:/1731978701728/Put/seqid=0 2024-11-19T01:11:41,779 INFO [RS:0;5134ffc85563:33403-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e64315a6f596425fa397bb4426ef86e#info#compaction#79 average throughput is 36.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T01:11:41,780 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/d043a599aab542c0bce76536c8c16b8f is 1080, key is row0062/info:/1731978683323/Put/seqid=0 2024-11-19T01:11:41,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741864_1040 (size=22238) 2024-11-19T01:11:41,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741864_1040 (size=22238) 2024-11-19T01:11:41,793 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/367b07bcfe5441ca9fd0096c5784626d 2024-11-19T01:11:41,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741865_1041 (size=120074) 2024-11-19T01:11:41,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741865_1041 (size=120074) 2024-11-19T01:11:41,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/367b07bcfe5441ca9fd0096c5784626d as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/367b07bcfe5441ca9fd0096c5784626d 2024-11-19T01:11:41,804 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/d043a599aab542c0bce76536c8c16b8f as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/d043a599aab542c0bce76536c8c16b8f 2024-11-19T01:11:41,805 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/367b07bcfe5441ca9fd0096c5784626d, entries=16, sequenceid=237, filesize=21.7 K 2024-11-19T01:11:41,806 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=9.46 KB/9684 for 8e64315a6f596425fa397bb4426ef86e in 45ms, sequenceid=237, compaction requested=false 2024-11-19T01:11:41,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8e64315a6f596425fa397bb4426ef86e: 2024-11-19T01:11:41,813 INFO [RS:0;5134ffc85563:33403-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8e64315a6f596425fa397bb4426ef86e/info of 8e64315a6f596425fa397bb4426ef86e into d043a599aab542c0bce76536c8c16b8f(size=117.3 K), total size for store is 139.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T01:11:41,813 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8e64315a6f596425fa397bb4426ef86e: 2024-11-19T01:11:41,813 INFO [RS:0;5134ffc85563:33403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e., storeName=8e64315a6f596425fa397bb4426ef86e/info, priority=13, startTime=1731978701759; duration=0sec 2024-11-19T01:11:41,813 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:41,813 DEBUG [RS:0;5134ffc85563:33403-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e64315a6f596425fa397bb4426ef86e:info 2024-11-19T01:11:42,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:42,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:43,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:43,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:43,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] regionserver.HRegion(8855): Flush requested on 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:43,787 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8e64315a6f596425fa397bb4426ef86e 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-19T01:11:43,791 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/d39bb34f23e14504b8247bd5b4f3db7d is 1080, key is row0184/info:/1731978701762/Put/seqid=0 2024-11-19T01:11:43,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741866_1042 (size=15750) 2024-11-19T01:11:43,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741866_1042 (size=15750) 2024-11-19T01:11:43,801 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/d39bb34f23e14504b8247bd5b4f3db7d 2024-11-19T01:11:43,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/d39bb34f23e14504b8247bd5b4f3db7d as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/d39bb34f23e14504b8247bd5b4f3db7d 2024-11-19T01:11:43,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/d39bb34f23e14504b8247bd5b4f3db7d, entries=10, sequenceid=251, filesize=15.4 K 2024-11-19T01:11:43,816 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=12.61 KB/12912 for 8e64315a6f596425fa397bb4426ef86e in 28ms, sequenceid=251, compaction requested=true 2024-11-19T01:11:43,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8e64315a6f596425fa397bb4426ef86e: 2024-11-19T01:11:43,816 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T01:11:43,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e64315a6f596425fa397bb4426ef86e:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T01:11:43,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:43,817 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 158062 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T01:11:43,817 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1541): 8e64315a6f596425fa397bb4426ef86e/info is initiating minor compaction (all files) 2024-11-19T01:11:43,817 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8e64315a6f596425fa397bb4426ef86e/info in TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e. 2024-11-19T01:11:43,817 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/d043a599aab542c0bce76536c8c16b8f, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/367b07bcfe5441ca9fd0096c5784626d, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/d39bb34f23e14504b8247bd5b4f3db7d] into tmpdir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp, totalSize=154.4 K 2024-11-19T01:11:43,818 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting d043a599aab542c0bce76536c8c16b8f, keycount=106, bloomtype=ROW, size=117.3 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1731978683323 2024-11-19T01:11:43,818 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting 367b07bcfe5441ca9fd0096c5784626d, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731978701728 2024-11-19T01:11:43,819 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting d39bb34f23e14504b8247bd5b4f3db7d, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731978701762 2024-11-19T01:11:43,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] regionserver.HRegion(8855): Flush requested on 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:43,819 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8e64315a6f596425fa397bb4426ef86e 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-19T01:11:43,826 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/38026819d9f645e1a3a5ed67abf4aed1 is 1080, key is row0194/info:/1731978703788/Put/seqid=0 2024-11-19T01:11:43,834 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e64315a6f596425fa397bb4426ef86e#info#compaction#82 average throughput is 45.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T01:11:43,834 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/80d49c132767486ba610cd48e711c879 is 1080, key is row0062/info:/1731978683323/Put/seqid=0 2024-11-19T01:11:43,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741867_1043 (size=20089) 2024-11-19T01:11:43,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741867_1043 (size=20089) 2024-11-19T01:11:43,848 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=268 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/38026819d9f645e1a3a5ed67abf4aed1 2024-11-19T01:11:43,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/38026819d9f645e1a3a5ed67abf4aed1 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/38026819d9f645e1a3a5ed67abf4aed1 2024-11-19T01:11:43,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8e64315a6f596425fa397bb4426ef86e, server=5134ffc85563,33403,1731978669885 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-19T01:11:43,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53142 deadline: 1731978713860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8e64315a6f596425fa397bb4426ef86e, server=5134ffc85563,33403,1731978669885 2024-11-19T01:11:43,862 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e., hostname=5134ffc85563,33403,1731978669885, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e., hostname=5134ffc85563,33403,1731978669885, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8e64315a6f596425fa397bb4426ef86e, server=5134ffc85563,33403,1731978669885 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T01:11:43,862 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e., hostname=5134ffc85563,33403,1731978669885, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8e64315a6f596425fa397bb4426ef86e, server=5134ffc85563,33403,1731978669885 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T01:11:43,862 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e., hostname=5134ffc85563,33403,1731978669885, seqNum=127 because the exception is null or not the one we care about 2024-11-19T01:11:43,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741868_1044 (size=148409) 2024-11-19T01:11:43,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741868_1044 (size=148409) 2024-11-19T01:11:43,867 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/38026819d9f645e1a3a5ed67abf4aed1, entries=14, sequenceid=268, filesize=19.6 K 2024-11-19T01:11:43,868 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=15.76 KB/16140 for 8e64315a6f596425fa397bb4426ef86e in 49ms, sequenceid=268, compaction requested=false 2024-11-19T01:11:43,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8e64315a6f596425fa397bb4426ef86e: 2024-11-19T01:11:43,872 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/80d49c132767486ba610cd48e711c879 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/80d49c132767486ba610cd48e711c879 2024-11-19T01:11:43,878 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8e64315a6f596425fa397bb4426ef86e/info of 8e64315a6f596425fa397bb4426ef86e into 80d49c132767486ba610cd48e711c879(size=144.9 K), total size for store is 164.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T01:11:43,878 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8e64315a6f596425fa397bb4426ef86e: 2024-11-19T01:11:43,878 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e., storeName=8e64315a6f596425fa397bb4426ef86e/info, priority=13, startTime=1731978703816; duration=0sec 2024-11-19T01:11:43,878 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:43,878 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e64315a6f596425fa397bb4426ef86e:info 2024-11-19T01:11:44,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:44,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:45,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:45,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:46,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:46,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:47,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:47,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:48,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:48,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:49,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:49,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:50,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:50,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:51,184 INFO [master/5134ffc85563:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T01:11:51,184 INFO [master/5134ffc85563:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T01:11:51,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:51,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:52,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:52,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:53,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:53,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:53,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] regionserver.HRegion(8855): Flush requested on 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:53,906 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8e64315a6f596425fa397bb4426ef86e 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-19T01:11:53,911 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/64c2c5231d4645918367ceb98a8f92a2 is 1080, key is row0208/info:/1731978703820/Put/seqid=0 2024-11-19T01:11:53,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741869_1045 (size=22254) 2024-11-19T01:11:53,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741869_1045 (size=22254) 2024-11-19T01:11:53,919 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/64c2c5231d4645918367ceb98a8f92a2 2024-11-19T01:11:53,925 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/64c2c5231d4645918367ceb98a8f92a2 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/64c2c5231d4645918367ceb98a8f92a2 2024-11-19T01:11:53,929 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/64c2c5231d4645918367ceb98a8f92a2, entries=16, sequenceid=288, filesize=21.7 K 2024-11-19T01:11:53,930 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=1.05 KB/1076 for 8e64315a6f596425fa397bb4426ef86e in 24ms, sequenceid=288, compaction requested=true 2024-11-19T01:11:53,930 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8e64315a6f596425fa397bb4426ef86e: 2024-11-19T01:11:53,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e64315a6f596425fa397bb4426ef86e:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T01:11:53,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:53,930 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T01:11:53,931 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 190752 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T01:11:53,931 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1541): 8e64315a6f596425fa397bb4426ef86e/info is initiating minor compaction (all files) 2024-11-19T01:11:53,931 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8e64315a6f596425fa397bb4426ef86e/info in TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e. 2024-11-19T01:11:53,931 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/80d49c132767486ba610cd48e711c879, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/38026819d9f645e1a3a5ed67abf4aed1, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/64c2c5231d4645918367ceb98a8f92a2] into tmpdir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp, totalSize=186.3 K 2024-11-19T01:11:53,931 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting 80d49c132767486ba610cd48e711c879, keycount=132, bloomtype=ROW, size=144.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731978683323 2024-11-19T01:11:53,932 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting 38026819d9f645e1a3a5ed67abf4aed1, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1731978703788 2024-11-19T01:11:53,932 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting 64c2c5231d4645918367ceb98a8f92a2, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1731978703820 2024-11-19T01:11:53,943 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e64315a6f596425fa397bb4426ef86e#info#compaction#84 average throughput is 55.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T01:11:53,944 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/ecb9b25dd1694fa9bde31fc025066a1e is 1080, key is row0062/info:/1731978683323/Put/seqid=0 2024-11-19T01:11:53,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741870_1046 (size=180886) 2024-11-19T01:11:53,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741870_1046 (size=180886) 2024-11-19T01:11:53,953 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/ecb9b25dd1694fa9bde31fc025066a1e as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/ecb9b25dd1694fa9bde31fc025066a1e 2024-11-19T01:11:53,958 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8e64315a6f596425fa397bb4426ef86e/info of 8e64315a6f596425fa397bb4426ef86e into ecb9b25dd1694fa9bde31fc025066a1e(size=176.6 K), total size for store is 176.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T01:11:53,958 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8e64315a6f596425fa397bb4426ef86e: 2024-11-19T01:11:53,958 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e., storeName=8e64315a6f596425fa397bb4426ef86e/info, priority=13, startTime=1731978713930; duration=0sec 2024-11-19T01:11:53,958 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:53,958 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e64315a6f596425fa397bb4426ef86e:info 2024-11-19T01:11:54,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:54,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:55,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:55,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:55,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] regionserver.HRegion(8855): Flush requested on 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:55,918 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8e64315a6f596425fa397bb4426ef86e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T01:11:55,922 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/c48a0a1de5324fc396f6d470f523dcc6 is 1080, key is row0224/info:/1731978713908/Put/seqid=0 2024-11-19T01:11:55,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741871_1047 (size=12523) 2024-11-19T01:11:55,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741871_1047 (size=12523) 2024-11-19T01:11:55,929 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/c48a0a1de5324fc396f6d470f523dcc6 2024-11-19T01:11:55,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/c48a0a1de5324fc396f6d470f523dcc6 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/c48a0a1de5324fc396f6d470f523dcc6 2024-11-19T01:11:55,939 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/c48a0a1de5324fc396f6d470f523dcc6, entries=7, sequenceid=299, filesize=12.2 K 2024-11-19T01:11:55,940 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 8e64315a6f596425fa397bb4426ef86e in 22ms, sequenceid=299, compaction requested=false 2024-11-19T01:11:55,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8e64315a6f596425fa397bb4426ef86e: 2024-11-19T01:11:55,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33403 {}] regionserver.HRegion(8855): Flush requested on 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:55,940 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8e64315a6f596425fa397bb4426ef86e 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-19T01:11:55,944 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/eda3afa50db34090a3ec0b67a03d65ad is 1080, key is row0231/info:/1731978715919/Put/seqid=0 2024-11-19T01:11:55,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741872_1048 (size=21171) 2024-11-19T01:11:55,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741872_1048 (size=21171) 2024-11-19T01:11:55,950 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/eda3afa50db34090a3ec0b67a03d65ad 2024-11-19T01:11:55,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/eda3afa50db34090a3ec0b67a03d65ad as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/eda3afa50db34090a3ec0b67a03d65ad 2024-11-19T01:11:55,960 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/eda3afa50db34090a3ec0b67a03d65ad, entries=15, sequenceid=317, filesize=20.7 K 2024-11-19T01:11:55,961 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=11.56 KB/11836 for 8e64315a6f596425fa397bb4426ef86e in 21ms, sequenceid=317, compaction requested=true 2024-11-19T01:11:55,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8e64315a6f596425fa397bb4426ef86e: 2024-11-19T01:11:55,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e64315a6f596425fa397bb4426ef86e:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T01:11:55,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:55,962 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T01:11:55,962 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 214580 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T01:11:55,963 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1541): 8e64315a6f596425fa397bb4426ef86e/info is initiating minor compaction (all files) 2024-11-19T01:11:55,963 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8e64315a6f596425fa397bb4426ef86e/info in TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e. 2024-11-19T01:11:55,963 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/ecb9b25dd1694fa9bde31fc025066a1e, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/c48a0a1de5324fc396f6d470f523dcc6, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/eda3afa50db34090a3ec0b67a03d65ad] into tmpdir=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp, totalSize=209.6 K 2024-11-19T01:11:55,963 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting ecb9b25dd1694fa9bde31fc025066a1e, keycount=162, bloomtype=ROW, size=176.6 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1731978683323 2024-11-19T01:11:55,963 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting c48a0a1de5324fc396f6d470f523dcc6, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1731978713908 2024-11-19T01:11:55,964 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] compactions.Compactor(225): Compacting eda3afa50db34090a3ec0b67a03d65ad, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1731978715919 2024-11-19T01:11:55,974 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e64315a6f596425fa397bb4426ef86e#info#compaction#87 average throughput is 62.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T01:11:55,975 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/dc4c93a219cc46dcb55de049842f1251 is 1080, key is row0062/info:/1731978683323/Put/seqid=0 2024-11-19T01:11:55,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741873_1049 (size=204803) 2024-11-19T01:11:55,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741873_1049 (size=204803) 2024-11-19T01:11:55,982 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/dc4c93a219cc46dcb55de049842f1251 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/dc4c93a219cc46dcb55de049842f1251 2024-11-19T01:11:55,987 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8e64315a6f596425fa397bb4426ef86e/info of 8e64315a6f596425fa397bb4426ef86e into dc4c93a219cc46dcb55de049842f1251(size=200.0 K), total size for store is 200.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T01:11:55,987 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8e64315a6f596425fa397bb4426ef86e: 2024-11-19T01:11:55,987 INFO [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e., storeName=8e64315a6f596425fa397bb4426ef86e/info, priority=13, startTime=1731978715961; duration=0sec 2024-11-19T01:11:55,987 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T01:11:55,987 DEBUG [RS:0;5134ffc85563:33403-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e64315a6f596425fa397bb4426ef86e:info 2024-11-19T01:11:56,045 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-19T01:11:56,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:56,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:57,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:57,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:57,957 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-19T01:11:57,957 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C33403%2C1731978669885.1731978717957 2024-11-19T01:11:57,964 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:57,965 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:57,965 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:57,965 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:57,965 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:57,965 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/WALs/5134ffc85563,33403,1731978669885/5134ffc85563%2C33403%2C1731978669885.1731978670450 with entries=310, filesize=307.89 KB; new WAL /user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/WALs/5134ffc85563,33403,1731978669885/5134ffc85563%2C33403%2C1731978669885.1731978717957 2024-11-19T01:11:57,966 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40651:40651),(127.0.0.1/127.0.0.1:44843:44843)] 2024-11-19T01:11:57,966 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/WALs/5134ffc85563,33403,1731978669885/5134ffc85563%2C33403%2C1731978669885.1731978670450 is not closed yet, will try archiving it next time 2024-11-19T01:11:57,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741833_1009 (size=315283) 2024-11-19T01:11:57,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741833_1009 (size=315283) 2024-11-19T01:11:57,971 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-19T01:11:57,978 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/.tmp/info/ab8cdb81b9e74c18a66749e9156b9e41 is 193, key is TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e./info:regioninfo/1731978686102/Put/seqid=0 2024-11-19T01:11:58,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741875_1051 (size=6223) 2024-11-19T01:11:58,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741875_1051 (size=6223) 2024-11-19T01:11:58,018 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/.tmp/info/ab8cdb81b9e74c18a66749e9156b9e41 2024-11-19T01:11:58,027 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/.tmp/info/ab8cdb81b9e74c18a66749e9156b9e41 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/info/ab8cdb81b9e74c18a66749e9156b9e41 2024-11-19T01:11:58,035 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/info/ab8cdb81b9e74c18a66749e9156b9e41, entries=5, sequenceid=21, filesize=6.1 K 2024-11-19T01:11:58,036 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 65ms, sequenceid=21, compaction requested=false 2024-11-19T01:11:58,036 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-19T01:11:58,036 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 8e64315a6f596425fa397bb4426ef86e 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-19T01:11:58,041 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/97f9cbf63a0f4e4b9f6223a1b90e0934 is 1080, key is row0246/info:/1731978715941/Put/seqid=0 2024-11-19T01:11:58,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741876_1052 (size=16839) 2024-11-19T01:11:58,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741876_1052 (size=16839) 2024-11-19T01:11:58,073 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/97f9cbf63a0f4e4b9f6223a1b90e0934 2024-11-19T01:11:58,082 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/.tmp/info/97f9cbf63a0f4e4b9f6223a1b90e0934 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/97f9cbf63a0f4e4b9f6223a1b90e0934 2024-11-19T01:11:58,088 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/97f9cbf63a0f4e4b9f6223a1b90e0934, entries=11, sequenceid=332, filesize=16.4 K 2024-11-19T01:11:58,090 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=0 B/0 for 8e64315a6f596425fa397bb4426ef86e in 54ms, sequenceid=332, compaction requested=false 2024-11-19T01:11:58,090 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 8e64315a6f596425fa397bb4426ef86e: 2024-11-19T01:11:58,090 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 9435b66a7d149ab73c52092d2797ae91: 2024-11-19T01:11:58,090 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C33403%2C1731978669885.1731978718090 2024-11-19T01:11:58,127 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:58,127 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:58,127 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:58,128 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:58,128 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:58,128 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/WALs/5134ffc85563,33403,1731978669885/5134ffc85563%2C33403%2C1731978669885.1731978717957 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/WALs/5134ffc85563,33403,1731978669885/5134ffc85563%2C33403%2C1731978669885.1731978718090 2024-11-19T01:11:58,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741874_1050 (size=731) 2024-11-19T01:11:58,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741874_1050 (size=731) 2024-11-19T01:11:58,153 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44843:44843),(127.0.0.1/127.0.0.1:40651:40651)] 2024-11-19T01:11:58,159 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/WALs/5134ffc85563,33403,1731978669885/5134ffc85563%2C33403%2C1731978669885.1731978670450 to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/oldWALs/5134ffc85563%2C33403%2C1731978669885.1731978670450 2024-11-19T01:11:58,161 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T01:11:58,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T01:11:58,161 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T01:11:58,161 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:11:58,161 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:11:58,162 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:11:58,162 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T01:11:58,162 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T01:11:58,162 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1184778891, stopped=false 2024-11-19T01:11:58,162 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5134ffc85563,34829,1731978669810 2024-11-19T01:11:58,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33403-0x101088c23880001, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T01:11:58,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T01:11:58,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33403-0x101088c23880001, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:11:58,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:11:58,164 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T01:11:58,165 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T01:11:58,165 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:11:58,165 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:11:58,165 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5134ffc85563,33403,1731978669885' ***** 2024-11-19T01:11:58,165 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T01:11:58,166 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:11:58,166 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33403-0x101088c23880001, quorum=127.0.0.1:54802, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:11:58,166 INFO [RS:0;5134ffc85563:33403 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T01:11:58,166 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/WALs/5134ffc85563,33403,1731978669885/5134ffc85563%2C33403%2C1731978669885.1731978717957 to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/oldWALs/5134ffc85563%2C33403%2C1731978669885.1731978717957 2024-11-19T01:11:58,166 INFO [RS:0;5134ffc85563:33403 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T01:11:58,166 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T01:11:58,166 INFO [RS:0;5134ffc85563:33403 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T01:11:58,166 INFO [RS:0;5134ffc85563:33403 {}] regionserver.HRegionServer(3091): Received CLOSE for 8e64315a6f596425fa397bb4426ef86e 2024-11-19T01:11:58,167 INFO [RS:0;5134ffc85563:33403 {}] regionserver.HRegionServer(3091): Received CLOSE for 9435b66a7d149ab73c52092d2797ae91 2024-11-19T01:11:58,167 INFO [RS:0;5134ffc85563:33403 {}] regionserver.HRegionServer(959): stopping server 5134ffc85563,33403,1731978669885 2024-11-19T01:11:58,167 INFO [RS:0;5134ffc85563:33403 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T01:11:58,167 INFO [RS:0;5134ffc85563:33403 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5134ffc85563:33403. 2024-11-19T01:11:58,167 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8e64315a6f596425fa397bb4426ef86e, disabling compactions & flushes 2024-11-19T01:11:58,167 DEBUG [RS:0;5134ffc85563:33403 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:11:58,167 DEBUG [RS:0;5134ffc85563:33403 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:11:58,167 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e. 2024-11-19T01:11:58,167 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e. 2024-11-19T01:11:58,167 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e. after waiting 0 ms 2024-11-19T01:11:58,167 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e. 2024-11-19T01:11:58,167 INFO [RS:0;5134ffc85563:33403 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T01:11:58,167 INFO [RS:0;5134ffc85563:33403 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T01:11:58,167 INFO [RS:0;5134ffc85563:33403 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T01:11:58,167 INFO [RS:0;5134ffc85563:33403 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T01:11:58,169 INFO [RS:0;5134ffc85563:33403 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-19T01:11:58,169 DEBUG [RS:0;5134ffc85563:33403 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 8e64315a6f596425fa397bb4426ef86e=TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e., 9435b66a7d149ab73c52092d2797ae91=TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91.} 2024-11-19T01:11:58,169 DEBUG [RS:0;5134ffc85563:33403 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8e64315a6f596425fa397bb4426ef86e, 9435b66a7d149ab73c52092d2797ae91 2024-11-19T01:11:58,170 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T01:11:58,170 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T01:11:58,170 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T01:11:58,170 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T01:11:58,170 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T01:11:58,170 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/5cc87d5b9bb7495e83b9929de50d7b71.bcf5b20058f01b9e033c7ae9cd39b33f->hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/5cc87d5b9bb7495e83b9929de50d7b71-top, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-55980ce7ca124bbfbe23a311e1153a58, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-d5f697740c254de392b8d037468283cf, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/24ebbd920eaa4e4aa131736b43cb2a65, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-95a6866082844b518b10337ccb72c746, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/944b4d9a1d214d419805d2f467e73f60, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/1af2567a9d1c4dcaa61b1ee9d936946a, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/9bc2cf18ef6843c393214dd67a686153, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/4ac21276ec0f418a92f5e87009eb04e9, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/eec5b3388f594ada96fb872355ee944d, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/4b7aacfcbc6648a0bccbd607f0ba99a1, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/e9ac1d3082cf40feac4a66f5456ca30f, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/d043a599aab542c0bce76536c8c16b8f, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/5951d12374c54f6ebb7875a9e9ffaa14, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/367b07bcfe5441ca9fd0096c5784626d, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/80d49c132767486ba610cd48e711c879, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/d39bb34f23e14504b8247bd5b4f3db7d, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/38026819d9f645e1a3a5ed67abf4aed1, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/ecb9b25dd1694fa9bde31fc025066a1e, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/64c2c5231d4645918367ceb98a8f92a2, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/c48a0a1de5324fc396f6d470f523dcc6, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/eda3afa50db34090a3ec0b67a03d65ad] to archive 2024-11-19T01:11:58,171 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T01:11:58,173 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/5cc87d5b9bb7495e83b9929de50d7b71.bcf5b20058f01b9e033c7ae9cd39b33f to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/5cc87d5b9bb7495e83b9929de50d7b71.bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:58,175 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-55980ce7ca124bbfbe23a311e1153a58 to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-55980ce7ca124bbfbe23a311e1153a58 2024-11-19T01:11:58,176 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-d5f697740c254de392b8d037468283cf to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-d5f697740c254de392b8d037468283cf 2024-11-19T01:11:58,177 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/24ebbd920eaa4e4aa131736b43cb2a65 to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/24ebbd920eaa4e4aa131736b43cb2a65 2024-11-19T01:11:58,179 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-95a6866082844b518b10337ccb72c746 to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/TestLogRolling-testLogRolling=bcf5b20058f01b9e033c7ae9cd39b33f-95a6866082844b518b10337ccb72c746 2024-11-19T01:11:58,180 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/944b4d9a1d214d419805d2f467e73f60 to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/944b4d9a1d214d419805d2f467e73f60 2024-11-19T01:11:58,181 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/1af2567a9d1c4dcaa61b1ee9d936946a to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/1af2567a9d1c4dcaa61b1ee9d936946a 2024-11-19T01:11:58,183 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/9bc2cf18ef6843c393214dd67a686153 to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/9bc2cf18ef6843c393214dd67a686153 2024-11-19T01:11:58,184 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/4ac21276ec0f418a92f5e87009eb04e9 to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/4ac21276ec0f418a92f5e87009eb04e9 2024-11-19T01:11:58,186 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/eec5b3388f594ada96fb872355ee944d to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/eec5b3388f594ada96fb872355ee944d 2024-11-19T01:11:58,187 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/4b7aacfcbc6648a0bccbd607f0ba99a1 to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/4b7aacfcbc6648a0bccbd607f0ba99a1 2024-11-19T01:11:58,188 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/e9ac1d3082cf40feac4a66f5456ca30f to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/e9ac1d3082cf40feac4a66f5456ca30f 2024-11-19T01:11:58,193 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/d043a599aab542c0bce76536c8c16b8f to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/d043a599aab542c0bce76536c8c16b8f 2024-11-19T01:11:58,195 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/5951d12374c54f6ebb7875a9e9ffaa14 to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/5951d12374c54f6ebb7875a9e9ffaa14 2024-11-19T01:11:58,198 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/367b07bcfe5441ca9fd0096c5784626d to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/367b07bcfe5441ca9fd0096c5784626d 2024-11-19T01:11:58,199 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/80d49c132767486ba610cd48e711c879 to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/80d49c132767486ba610cd48e711c879 2024-11-19T01:11:58,201 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/d39bb34f23e14504b8247bd5b4f3db7d to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/d39bb34f23e14504b8247bd5b4f3db7d 2024-11-19T01:11:58,203 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/38026819d9f645e1a3a5ed67abf4aed1 to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/38026819d9f645e1a3a5ed67abf4aed1 2024-11-19T01:11:58,205 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/ecb9b25dd1694fa9bde31fc025066a1e to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/ecb9b25dd1694fa9bde31fc025066a1e 2024-11-19T01:11:58,206 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-19T01:11:58,207 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/64c2c5231d4645918367ceb98a8f92a2 to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/64c2c5231d4645918367ceb98a8f92a2 2024-11-19T01:11:58,207 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T01:11:58,207 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T01:11:58,208 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731978718170Running coprocessor pre-close hooks at 1731978718170Disabling compacts and flushes for region at 1731978718170Disabling writes for close at 1731978718170Writing region close event to WAL at 1731978718198 (+28 ms)Running coprocessor post-close hooks at 1731978718207 (+9 ms)Closed at 1731978718207 2024-11-19T01:11:58,208 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T01:11:58,209 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/c48a0a1de5324fc396f6d470f523dcc6 to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/c48a0a1de5324fc396f6d470f523dcc6 2024-11-19T01:11:58,210 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/eda3afa50db34090a3ec0b67a03d65ad to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/info/eda3afa50db34090a3ec0b67a03d65ad 2024-11-19T01:11:58,211 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5134ffc85563:34829 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-19T01:11:58,211 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [24ebbd920eaa4e4aa131736b43cb2a65=43081, 944b4d9a1d214d419805d2f467e73f60=12516, 1af2567a9d1c4dcaa61b1ee9d936946a=74586, 9bc2cf18ef6843c393214dd67a686153=28706, 4ac21276ec0f418a92f5e87009eb04e9=12516, eec5b3388f594ada96fb872355ee944d=96252, 4b7aacfcbc6648a0bccbd607f0ba99a1=19000, e9ac1d3082cf40feac4a66f5456ca30f=21156, d043a599aab542c0bce76536c8c16b8f=120074, 5951d12374c54f6ebb7875a9e9ffaa14=12516, 367b07bcfe5441ca9fd0096c5784626d=22238, 80d49c132767486ba610cd48e711c879=148409, d39bb34f23e14504b8247bd5b4f3db7d=15750, 38026819d9f645e1a3a5ed67abf4aed1=20089, ecb9b25dd1694fa9bde31fc025066a1e=180886, 64c2c5231d4645918367ceb98a8f92a2=22254, c48a0a1de5324fc396f6d470f523dcc6=12523, eda3afa50db34090a3ec0b67a03d65ad=21171] 2024-11-19T01:11:58,215 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/8e64315a6f596425fa397bb4426ef86e/recovered.edits/335.seqid, newMaxSeqId=335, maxSeqId=126 2024-11-19T01:11:58,215 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e. 2024-11-19T01:11:58,215 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8e64315a6f596425fa397bb4426ef86e: Waiting for close lock at 1731978718167Running coprocessor pre-close hooks at 1731978718167Disabling compacts and flushes for region at 1731978718167Disabling writes for close at 1731978718167Writing region close event to WAL at 1731978718211 (+44 ms)Running coprocessor post-close hooks at 1731978718215 (+4 ms)Closed at 1731978718215 2024-11-19T01:11:58,215 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731978685400.8e64315a6f596425fa397bb4426ef86e. 2024-11-19T01:11:58,216 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9435b66a7d149ab73c52092d2797ae91, disabling compactions & flushes 2024-11-19T01:11:58,216 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91. 2024-11-19T01:11:58,216 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91. 2024-11-19T01:11:58,216 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91. after waiting 0 ms 2024-11-19T01:11:58,216 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91. 2024-11-19T01:11:58,216 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/9435b66a7d149ab73c52092d2797ae91/info/5cc87d5b9bb7495e83b9929de50d7b71.bcf5b20058f01b9e033c7ae9cd39b33f->hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/bcf5b20058f01b9e033c7ae9cd39b33f/info/5cc87d5b9bb7495e83b9929de50d7b71-bottom] to archive 2024-11-19T01:11:58,217 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T01:11:58,219 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/9435b66a7d149ab73c52092d2797ae91/info/5cc87d5b9bb7495e83b9929de50d7b71.bcf5b20058f01b9e033c7ae9cd39b33f to hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/archive/data/default/TestLogRolling-testLogRolling/9435b66a7d149ab73c52092d2797ae91/info/5cc87d5b9bb7495e83b9929de50d7b71.bcf5b20058f01b9e033c7ae9cd39b33f 2024-11-19T01:11:58,219 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-19T01:11:58,226 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/data/default/TestLogRolling-testLogRolling/9435b66a7d149ab73c52092d2797ae91/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-11-19T01:11:58,226 INFO [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91. 2024-11-19T01:11:58,226 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9435b66a7d149ab73c52092d2797ae91: Waiting for close lock at 1731978718216Running coprocessor pre-close hooks at 1731978718216Disabling compacts and flushes for region at 1731978718216Disabling writes for close at 1731978718216Writing region close event to WAL at 1731978718222 (+6 ms)Running coprocessor post-close hooks at 1731978718226 (+4 ms)Closed at 1731978718226 2024-11-19T01:11:58,226 DEBUG [RS_CLOSE_REGION-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731978685400.9435b66a7d149ab73c52092d2797ae91. 2024-11-19T01:11:58,300 INFO [regionserver/5134ffc85563:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T01:11:58,301 INFO [regionserver/5134ffc85563:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T01:11:58,303 INFO [regionserver/5134ffc85563:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T01:11:58,370 INFO [RS:0;5134ffc85563:33403 {}] regionserver.HRegionServer(976): stopping server 5134ffc85563,33403,1731978669885; all regions closed. 2024-11-19T01:11:58,370 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:58,370 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:58,371 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:58,371 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:58,371 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:58,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741834_1010 (size=8107) 2024-11-19T01:11:58,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741834_1010 (size=8107) 2024-11-19T01:11:58,378 DEBUG [RS:0;5134ffc85563:33403 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/oldWALs 2024-11-19T01:11:58,378 INFO [RS:0;5134ffc85563:33403 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5134ffc85563%2C33403%2C1731978669885.meta:.meta(num 1731978671030) 2024-11-19T01:11:58,378 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:58,379 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:58,379 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:58,379 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:58,379 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:58,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741877_1053 (size=780) 2024-11-19T01:11:58,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741877_1053 (size=780) 2024-11-19T01:11:58,383 DEBUG [RS:0;5134ffc85563:33403 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/oldWALs 2024-11-19T01:11:58,383 INFO [RS:0;5134ffc85563:33403 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5134ffc85563%2C33403%2C1731978669885:(num 1731978718090) 2024-11-19T01:11:58,383 DEBUG [RS:0;5134ffc85563:33403 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:11:58,383 INFO [RS:0;5134ffc85563:33403 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T01:11:58,384 INFO [RS:0;5134ffc85563:33403 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T01:11:58,384 INFO [RS:0;5134ffc85563:33403 {}] hbase.ChoreService(370): Chore service for: regionserver/5134ffc85563:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-19T01:11:58,384 INFO [RS:0;5134ffc85563:33403 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T01:11:58,384 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T01:11:58,384 INFO [RS:0;5134ffc85563:33403 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33403 2024-11-19T01:11:58,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33403-0x101088c23880001, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5134ffc85563,33403,1731978669885 2024-11-19T01:11:58,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T01:11:58,386 INFO [RS:0;5134ffc85563:33403 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T01:11:58,388 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5134ffc85563,33403,1731978669885] 2024-11-19T01:11:58,389 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5134ffc85563,33403,1731978669885 already deleted, retry=false 2024-11-19T01:11:58,389 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5134ffc85563,33403,1731978669885 expired; onlineServers=0 2024-11-19T01:11:58,389 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5134ffc85563,34829,1731978669810' ***** 2024-11-19T01:11:58,390 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T01:11:58,390 INFO [M:0;5134ffc85563:34829 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T01:11:58,390 INFO [M:0;5134ffc85563:34829 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T01:11:58,390 DEBUG [M:0;5134ffc85563:34829 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T01:11:58,390 DEBUG [M:0;5134ffc85563:34829 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T01:11:58,390 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T01:11:58,390 DEBUG [master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978670239 {}] cleaner.HFileCleaner(306): Exit Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978670239,5,FailOnTimeoutGroup] 2024-11-19T01:11:58,390 INFO [M:0;5134ffc85563:34829 {}] hbase.ChoreService(370): Chore service for: master/5134ffc85563:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T01:11:58,390 DEBUG [master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978670253 {}] cleaner.HFileCleaner(306): Exit Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978670253,5,FailOnTimeoutGroup] 2024-11-19T01:11:58,390 INFO [M:0;5134ffc85563:34829 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T01:11:58,390 DEBUG [M:0;5134ffc85563:34829 {}] master.HMaster(1795): Stopping service threads 2024-11-19T01:11:58,390 INFO [M:0;5134ffc85563:34829 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T01:11:58,390 INFO [M:0;5134ffc85563:34829 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T01:11:58,391 INFO [M:0;5134ffc85563:34829 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T01:11:58,391 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T01:11:58,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T01:11:58,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:11:58,392 DEBUG [M:0;5134ffc85563:34829 {}] zookeeper.ZKUtil(347): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T01:11:58,392 WARN [M:0;5134ffc85563:34829 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T01:11:58,392 INFO [M:0;5134ffc85563:34829 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/.lastflushedseqids 2024-11-19T01:11:58,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741878_1054 (size=228) 2024-11-19T01:11:58,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741878_1054 (size=228) 2024-11-19T01:11:58,399 INFO [M:0;5134ffc85563:34829 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T01:11:58,399 INFO [M:0;5134ffc85563:34829 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T01:11:58,399 DEBUG [M:0;5134ffc85563:34829 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T01:11:58,399 INFO [M:0;5134ffc85563:34829 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:11:58,399 DEBUG [M:0;5134ffc85563:34829 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:11:58,399 DEBUG [M:0;5134ffc85563:34829 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T01:11:58,399 DEBUG [M:0;5134ffc85563:34829 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:11:58,399 INFO [M:0;5134ffc85563:34829 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.36 KB 2024-11-19T01:11:58,421 DEBUG [M:0;5134ffc85563:34829 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a979431be1824fb0a9f789b28c307f8a is 82, key is hbase:meta,,1/info:regioninfo/1731978671064/Put/seqid=0 2024-11-19T01:11:58,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741879_1055 (size=5672) 2024-11-19T01:11:58,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741879_1055 (size=5672) 2024-11-19T01:11:58,426 INFO [M:0;5134ffc85563:34829 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a979431be1824fb0a9f789b28c307f8a 2024-11-19T01:11:58,452 DEBUG [M:0;5134ffc85563:34829 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/371064964606403ea95f6ce55e7d0643 is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731978671525/Put/seqid=0 2024-11-19T01:11:58,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741880_1056 (size=7091) 2024-11-19T01:11:58,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741880_1056 (size=7091) 2024-11-19T01:11:58,459 INFO [M:0;5134ffc85563:34829 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/371064964606403ea95f6ce55e7d0643 2024-11-19T01:11:58,466 INFO [M:0;5134ffc85563:34829 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 371064964606403ea95f6ce55e7d0643 2024-11-19T01:11:58,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:58,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:58,484 DEBUG [M:0;5134ffc85563:34829 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/629d75400b1048d784b8f6774c5eaabc is 69, key is 5134ffc85563,33403,1731978669885/rs:state/1731978670269/Put/seqid=0 2024-11-19T01:11:58,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33403-0x101088c23880001, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:11:58,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33403-0x101088c23880001, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:11:58,488 INFO [RS:0;5134ffc85563:33403 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T01:11:58,488 INFO [RS:0;5134ffc85563:33403 {}] regionserver.HRegionServer(1031): Exiting; stopping=5134ffc85563,33403,1731978669885; zookeeper connection closed. 2024-11-19T01:11:58,489 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5f8338af {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5f8338af 2024-11-19T01:11:58,489 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T01:11:58,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741881_1057 (size=5156) 2024-11-19T01:11:58,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741881_1057 (size=5156) 2024-11-19T01:11:58,491 INFO [M:0;5134ffc85563:34829 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/629d75400b1048d784b8f6774c5eaabc 2024-11-19T01:11:58,514 DEBUG [M:0;5134ffc85563:34829 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2c2854f9aa614f7c88165195b5df7542 is 52, key is load_balancer_on/state:d/1731978671143/Put/seqid=0 2024-11-19T01:11:58,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741882_1058 (size=5056) 2024-11-19T01:11:58,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741882_1058 (size=5056) 2024-11-19T01:11:58,524 INFO [M:0;5134ffc85563:34829 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2c2854f9aa614f7c88165195b5df7542 2024-11-19T01:11:58,530 DEBUG [M:0;5134ffc85563:34829 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a979431be1824fb0a9f789b28c307f8a as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a979431be1824fb0a9f789b28c307f8a 2024-11-19T01:11:58,534 INFO [M:0;5134ffc85563:34829 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a979431be1824fb0a9f789b28c307f8a, entries=8, sequenceid=125, filesize=5.5 K 2024-11-19T01:11:58,535 DEBUG [M:0;5134ffc85563:34829 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/371064964606403ea95f6ce55e7d0643 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/371064964606403ea95f6ce55e7d0643 2024-11-19T01:11:58,539 INFO [M:0;5134ffc85563:34829 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 371064964606403ea95f6ce55e7d0643 2024-11-19T01:11:58,540 INFO [M:0;5134ffc85563:34829 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/371064964606403ea95f6ce55e7d0643, entries=13, sequenceid=125, filesize=6.9 K 2024-11-19T01:11:58,541 DEBUG [M:0;5134ffc85563:34829 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/629d75400b1048d784b8f6774c5eaabc as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/629d75400b1048d784b8f6774c5eaabc 2024-11-19T01:11:58,547 INFO [M:0;5134ffc85563:34829 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/629d75400b1048d784b8f6774c5eaabc, entries=1, sequenceid=125, filesize=5.0 K 2024-11-19T01:11:58,548 DEBUG [M:0;5134ffc85563:34829 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2c2854f9aa614f7c88165195b5df7542 as hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2c2854f9aa614f7c88165195b5df7542 2024-11-19T01:11:58,553 INFO [M:0;5134ffc85563:34829 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41945/user/jenkins/test-data/8933e619-a7fb-280b-7f42-c233587637d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2c2854f9aa614f7c88165195b5df7542, entries=1, sequenceid=125, filesize=4.9 K 2024-11-19T01:11:58,554 INFO [M:0;5134ffc85563:34829 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 155ms, sequenceid=125, compaction requested=false 2024-11-19T01:11:58,556 INFO [M:0;5134ffc85563:34829 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:11:58,556 DEBUG [M:0;5134ffc85563:34829 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731978718399Disabling compacts and flushes for region at 1731978718399Disabling writes for close at 1731978718399Obtaining lock to block concurrent updates at 1731978718399Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731978718399Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1731978718400 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731978718400Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731978718401 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731978718420 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731978718420Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731978718432 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731978718451 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731978718451Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731978718466 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731978718483 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731978718483Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731978718498 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731978718514 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731978718514Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42475339: reopening flushed file at 1731978718529 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15c0ef60: reopening flushed file at 1731978718535 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3284e19e: reopening flushed file at 1731978718540 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@b038ede: reopening flushed file at 1731978718547 (+7 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 155ms, sequenceid=125, compaction requested=false at 1731978718554 (+7 ms)Writing region close event to WAL at 1731978718556 (+2 ms)Closed at 1731978718556 2024-11-19T01:11:58,557 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:58,557 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:58,557 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:58,557 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:58,557 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:11:58,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741830_1006 (size=61332) 2024-11-19T01:11:58,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741830_1006 (size=61332) 2024-11-19T01:11:58,561 INFO [M:0;5134ffc85563:34829 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T01:11:58,561 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T01:11:58,561 INFO [M:0;5134ffc85563:34829 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34829 2024-11-19T01:11:58,562 INFO [M:0;5134ffc85563:34829 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T01:11:58,664 INFO [M:0;5134ffc85563:34829 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T01:11:58,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:11:58,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34829-0x101088c23880000, quorum=127.0.0.1:54802, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:11:58,693 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4d5c4bd3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:11:58,693 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@811037{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:11:58,693 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:11:58,694 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fe51576{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:11:58,694 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cfa2328{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/hadoop.log.dir/,STOPPED} 2024-11-19T01:11:58,698 WARN [BP-1228638944-172.17.0.2-1731978668799 heartbeating to localhost/127.0.0.1:41945 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:11:58,698 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:11:58,698 WARN [BP-1228638944-172.17.0.2-1731978668799 heartbeating to localhost/127.0.0.1:41945 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1228638944-172.17.0.2-1731978668799 (Datanode Uuid 085e1805-c6d5-46b7-980a-7386f7210858) service to localhost/127.0.0.1:41945 2024-11-19T01:11:58,698 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:11:58,699 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/cluster_63a32551-dc33-d7d3-13c7-d1f84bc8f7f8/data/data3/current/BP-1228638944-172.17.0.2-1731978668799 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:11:58,699 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/cluster_63a32551-dc33-d7d3-13c7-d1f84bc8f7f8/data/data4/current/BP-1228638944-172.17.0.2-1731978668799 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:11:58,699 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:11:58,718 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3def21d3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:11:58,718 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@41c3a275{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:11:58,719 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:11:58,719 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3aa952ac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:11:58,719 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@34e466bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/hadoop.log.dir/,STOPPED} 2024-11-19T01:11:58,721 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:11:58,721 WARN [BP-1228638944-172.17.0.2-1731978668799 heartbeating to localhost/127.0.0.1:41945 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:11:58,721 WARN [BP-1228638944-172.17.0.2-1731978668799 heartbeating to localhost/127.0.0.1:41945 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1228638944-172.17.0.2-1731978668799 (Datanode Uuid 57e639d9-7703-41ef-a4b2-47a5286afc67) service to localhost/127.0.0.1:41945 2024-11-19T01:11:58,721 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:11:58,722 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/cluster_63a32551-dc33-d7d3-13c7-d1f84bc8f7f8/data/data1/current/BP-1228638944-172.17.0.2-1731978668799 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:11:58,722 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/cluster_63a32551-dc33-d7d3-13c7-d1f84bc8f7f8/data/data2/current/BP-1228638944-172.17.0.2-1731978668799 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:11:58,722 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:11:58,737 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2f05aefd{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T01:11:58,738 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@282647c0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:11:58,738 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:11:58,738 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ffc76d1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:11:58,738 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47a053ae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/hadoop.log.dir/,STOPPED} 2024-11-19T01:11:58,749 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T01:11:58,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T01:11:58,811 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=229 (was 205) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41945 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41945 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:41945 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41945 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41945 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41945 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41945 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41945 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=512 (was 486) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=337 (was 316) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3648 (was 3705) 2024-11-19T01:11:58,822 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=229, OpenFileDescriptor=512, MaxFileDescriptor=1048576, SystemLoadAverage=337, ProcessCount=11, AvailableMemoryMB=3647 2024-11-19T01:11:58,822 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T01:11:58,823 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/hadoop.log.dir so I do NOT create it in target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b 2024-11-19T01:11:58,823 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77544ff5-c186-8dda-9d76-e8472c0b5393/hadoop.tmp.dir so I do NOT create it in target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b 2024-11-19T01:11:58,823 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/cluster_ea19eb7b-05d0-a961-de93-b1e4352e932d, deleteOnExit=true 2024-11-19T01:11:58,823 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T01:11:58,823 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/test.cache.data in system properties and HBase conf 2024-11-19T01:11:58,823 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T01:11:58,823 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/hadoop.log.dir in system properties and HBase conf 2024-11-19T01:11:58,823 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T01:11:58,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T01:11:58,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T01:11:58,824 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T01:11:58,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T01:11:58,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T01:11:58,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T01:11:58,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T01:11:58,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T01:11:58,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T01:11:58,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T01:11:58,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T01:11:58,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T01:11:58,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/nfs.dump.dir in system properties and HBase conf 2024-11-19T01:11:58,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/java.io.tmpdir in system properties and HBase conf 2024-11-19T01:11:58,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T01:11:58,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T01:11:58,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T01:11:58,843 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T01:11:58,929 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:11:58,934 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:11:58,945 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:11:58,945 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:11:58,946 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T01:11:58,952 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:11:58,953 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ee125cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:11:58,954 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72d15c59{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:11:59,089 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@470d812e{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/java.io.tmpdir/jetty-localhost-46347-hadoop-hdfs-3_4_1-tests_jar-_-any-7643522679170376478/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T01:11:59,090 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a6fb517{HTTP/1.1, (http/1.1)}{localhost:46347} 2024-11-19T01:11:59,090 INFO [Time-limited test {}] server.Server(415): Started @290386ms 2024-11-19T01:11:59,104 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T01:11:59,189 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:11:59,196 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:11:59,205 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:11:59,205 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:11:59,205 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T01:11:59,207 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d148abe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:11:59,208 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@717a950c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:11:59,352 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7e58a9be{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/java.io.tmpdir/jetty-localhost-35299-hadoop-hdfs-3_4_1-tests_jar-_-any-17689867728148379495/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:11:59,352 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3910812a{HTTP/1.1, (http/1.1)}{localhost:35299} 2024-11-19T01:11:59,352 INFO [Time-limited test {}] server.Server(415): Started @290648ms 2024-11-19T01:11:59,353 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:11:59,414 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T01:11:59,417 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T01:11:59,422 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T01:11:59,422 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T01:11:59,422 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T01:11:59,422 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2578bc63{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/hadoop.log.dir/,AVAILABLE} 2024-11-19T01:11:59,423 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e44754{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T01:11:59,458 WARN [Thread-2466 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/cluster_ea19eb7b-05d0-a961-de93-b1e4352e932d/data/data1/current/BP-1348549008-172.17.0.2-1731978718850/current, will proceed with Du for space computation calculation, 2024-11-19T01:11:59,458 WARN [Thread-2467 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/cluster_ea19eb7b-05d0-a961-de93-b1e4352e932d/data/data2/current/BP-1348549008-172.17.0.2-1731978718850/current, will proceed with Du for space computation calculation, 2024-11-19T01:11:59,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:59,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:11:59,497 WARN [Thread-2445 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:11:59,500 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7d303b119a4892e with lease ID 0x52a42f6932dbedd6: Processing first storage report for DS-d7db0a30-0823-4b92-9aed-89fd83f8747d from datanode DatanodeRegistration(127.0.0.1:35247, datanodeUuid=75d63ec3-eb6e-4885-84e6-42e4823b2c8f, infoPort=41895, infoSecurePort=0, ipcPort=42939, storageInfo=lv=-57;cid=testClusterID;nsid=1767442204;c=1731978718850) 2024-11-19T01:11:59,500 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7d303b119a4892e with lease ID 0x52a42f6932dbedd6: from storage DS-d7db0a30-0823-4b92-9aed-89fd83f8747d node DatanodeRegistration(127.0.0.1:35247, datanodeUuid=75d63ec3-eb6e-4885-84e6-42e4823b2c8f, infoPort=41895, infoSecurePort=0, ipcPort=42939, storageInfo=lv=-57;cid=testClusterID;nsid=1767442204;c=1731978718850), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:11:59,500 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7d303b119a4892e with lease ID 0x52a42f6932dbedd6: Processing first storage report for DS-93d1e303-b20b-449b-ae9f-209f1e6e0012 from datanode DatanodeRegistration(127.0.0.1:35247, datanodeUuid=75d63ec3-eb6e-4885-84e6-42e4823b2c8f, infoPort=41895, infoSecurePort=0, ipcPort=42939, storageInfo=lv=-57;cid=testClusterID;nsid=1767442204;c=1731978718850) 2024-11-19T01:11:59,500 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7d303b119a4892e with lease ID 0x52a42f6932dbedd6: from storage DS-93d1e303-b20b-449b-ae9f-209f1e6e0012 node DatanodeRegistration(127.0.0.1:35247, datanodeUuid=75d63ec3-eb6e-4885-84e6-42e4823b2c8f, infoPort=41895, infoSecurePort=0, ipcPort=42939, storageInfo=lv=-57;cid=testClusterID;nsid=1767442204;c=1731978718850), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:11:59,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b0e389f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/java.io.tmpdir/jetty-localhost-37007-hadoop-hdfs-3_4_1-tests_jar-_-any-6428016891794333014/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:11:59,561 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@56063e0c{HTTP/1.1, (http/1.1)}{localhost:37007} 2024-11-19T01:11:59,561 INFO [Time-limited test {}] server.Server(415): Started @290857ms 2024-11-19T01:11:59,562 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T01:11:59,685 WARN [Thread-2492 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/cluster_ea19eb7b-05d0-a961-de93-b1e4352e932d/data/data3/current/BP-1348549008-172.17.0.2-1731978718850/current, will proceed with Du for space computation calculation, 2024-11-19T01:11:59,686 WARN [Thread-2493 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/cluster_ea19eb7b-05d0-a961-de93-b1e4352e932d/data/data4/current/BP-1348549008-172.17.0.2-1731978718850/current, will proceed with Du for space computation calculation, 2024-11-19T01:11:59,749 WARN [Thread-2481 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T01:11:59,755 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6d97ae7d964978f0 with lease ID 0x52a42f6932dbedd7: Processing first storage report for DS-367faadf-341f-4f24-a8e1-b0b259a73908 from datanode DatanodeRegistration(127.0.0.1:41825, datanodeUuid=0168b7ac-b570-4fbb-9c05-4eedfa4b73a7, infoPort=38359, infoSecurePort=0, ipcPort=39139, storageInfo=lv=-57;cid=testClusterID;nsid=1767442204;c=1731978718850) 2024-11-19T01:11:59,755 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6d97ae7d964978f0 with lease ID 0x52a42f6932dbedd7: from storage DS-367faadf-341f-4f24-a8e1-b0b259a73908 node DatanodeRegistration(127.0.0.1:41825, datanodeUuid=0168b7ac-b570-4fbb-9c05-4eedfa4b73a7, infoPort=38359, infoSecurePort=0, ipcPort=39139, storageInfo=lv=-57;cid=testClusterID;nsid=1767442204;c=1731978718850), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:11:59,755 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6d97ae7d964978f0 with lease ID 0x52a42f6932dbedd7: Processing first storage report for DS-db1563eb-753a-4d8e-a434-c0787f61e773 from datanode DatanodeRegistration(127.0.0.1:41825, datanodeUuid=0168b7ac-b570-4fbb-9c05-4eedfa4b73a7, infoPort=38359, infoSecurePort=0, ipcPort=39139, storageInfo=lv=-57;cid=testClusterID;nsid=1767442204;c=1731978718850) 2024-11-19T01:11:59,756 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6d97ae7d964978f0 with lease ID 0x52a42f6932dbedd7: from storage DS-db1563eb-753a-4d8e-a434-c0787f61e773 node DatanodeRegistration(127.0.0.1:41825, datanodeUuid=0168b7ac-b570-4fbb-9c05-4eedfa4b73a7, infoPort=38359, infoSecurePort=0, ipcPort=39139, storageInfo=lv=-57;cid=testClusterID;nsid=1767442204;c=1731978718850), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T01:11:59,802 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b 2024-11-19T01:11:59,846 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/cluster_ea19eb7b-05d0-a961-de93-b1e4352e932d/zookeeper_0, clientPort=57881, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/cluster_ea19eb7b-05d0-a961-de93-b1e4352e932d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/cluster_ea19eb7b-05d0-a961-de93-b1e4352e932d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T01:11:59,854 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57881 2024-11-19T01:11:59,854 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:11:59,856 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:11:59,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35247 is added to blk_1073741825_1001 (size=7) 2024-11-19T01:11:59,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41825 is added to blk_1073741825_1001 (size=7) 2024-11-19T01:11:59,879 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4 with version=8 2024-11-19T01:11:59,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39161/user/jenkins/test-data/e8586cb2-bede-9a38-3970-d01d273eaf38/hbase-staging 2024-11-19T01:11:59,882 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5134ffc85563:0 server-side Connection retries=45 2024-11-19T01:11:59,882 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:11:59,882 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T01:11:59,882 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T01:11:59,882 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:11:59,882 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T01:11:59,882 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T01:11:59,882 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T01:11:59,883 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41123 2024-11-19T01:11:59,884 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41123 connecting to ZooKeeper ensemble=127.0.0.1:57881 2024-11-19T01:11:59,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:411230x0, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T01:11:59,897 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41123-0x101088ce6fa0000 connected 2024-11-19T01:11:59,925 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:11:59,927 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:11:59,929 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:11:59,929 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4, hbase.cluster.distributed=false 2024-11-19T01:11:59,931 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T01:11:59,941 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41123 2024-11-19T01:11:59,942 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41123 2024-11-19T01:11:59,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41123 2024-11-19T01:11:59,949 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41123 2024-11-19T01:11:59,953 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41123 2024-11-19T01:11:59,975 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5134ffc85563:0 server-side Connection retries=45 2024-11-19T01:11:59,975 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:11:59,976 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T01:11:59,976 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T01:11:59,976 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T01:11:59,976 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T01:11:59,976 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T01:11:59,976 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T01:11:59,977 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44605 2024-11-19T01:11:59,979 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44605 connecting to ZooKeeper ensemble=127.0.0.1:57881 2024-11-19T01:11:59,980 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:11:59,982 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:11:59,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:446050x0, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T01:11:59,998 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:446050x0, quorum=127.0.0.1:57881, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:11:59,998 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44605-0x101088ce6fa0001 connected 2024-11-19T01:11:59,998 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T01:12:00,005 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T01:12:00,006 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44605-0x101088ce6fa0001, quorum=127.0.0.1:57881, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T01:12:00,007 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44605-0x101088ce6fa0001, quorum=127.0.0.1:57881, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T01:12:00,013 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44605 2024-11-19T01:12:00,017 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44605 2024-11-19T01:12:00,021 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44605 2024-11-19T01:12:00,021 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44605 2024-11-19T01:12:00,023 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44605 2024-11-19T01:12:00,039 DEBUG [M:0;5134ffc85563:41123 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5134ffc85563:41123 2024-11-19T01:12:00,041 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5134ffc85563,41123,1731978719881 2024-11-19T01:12:00,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:12:00,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x101088ce6fa0001, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:12:00,045 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5134ffc85563,41123,1731978719881 2024-11-19T01:12:00,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x101088ce6fa0001, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T01:12:00,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x101088ce6fa0001, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:12:00,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:12:00,047 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T01:12:00,048 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5134ffc85563,41123,1731978719881 from backup master directory 2024-11-19T01:12:00,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5134ffc85563,41123,1731978719881 2024-11-19T01:12:00,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:12:00,049 WARN [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T01:12:00,050 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5134ffc85563,41123,1731978719881 2024-11-19T01:12:00,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x101088ce6fa0001, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T01:12:00,060 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/hbase.id] with ID: 32a26cdf-673b-42ba-8383-9e8551bb089f 2024-11-19T01:12:00,060 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/.tmp/hbase.id 2024-11-19T01:12:00,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41825 is added to blk_1073741826_1002 (size=42) 2024-11-19T01:12:00,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35247 is added to blk_1073741826_1002 (size=42) 2024-11-19T01:12:00,075 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/.tmp/hbase.id]:[hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/hbase.id] 2024-11-19T01:12:00,089 INFO [master/5134ffc85563:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:12:00,089 INFO [master/5134ffc85563:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T01:12:00,091 INFO [master/5134ffc85563:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-19T01:12:00,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x101088ce6fa0001, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:12:00,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:12:00,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41825 is added to blk_1073741827_1003 (size=196) 2024-11-19T01:12:00,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35247 is added to blk_1073741827_1003 (size=196) 2024-11-19T01:12:00,112 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T01:12:00,113 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T01:12:00,113 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:12:00,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35247 is added to blk_1073741828_1004 (size=1189) 2024-11-19T01:12:00,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41825 is added to blk_1073741828_1004 (size=1189) 2024-11-19T01:12:00,137 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store 2024-11-19T01:12:00,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35247 is added to blk_1073741829_1005 (size=34) 2024-11-19T01:12:00,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41825 is added to blk_1073741829_1005 (size=34) 2024-11-19T01:12:00,151 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:12:00,151 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T01:12:00,151 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:12:00,151 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:12:00,151 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T01:12:00,151 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:12:00,151 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:12:00,151 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731978720151Disabling compacts and flushes for region at 1731978720151Disabling writes for close at 1731978720151Writing region close event to WAL at 1731978720151Closed at 1731978720151 2024-11-19T01:12:00,152 WARN [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/.initializing 2024-11-19T01:12:00,152 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/WALs/5134ffc85563,41123,1731978719881 2024-11-19T01:12:00,155 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C41123%2C1731978719881, suffix=, logDir=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/WALs/5134ffc85563,41123,1731978719881, archiveDir=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/oldWALs, maxLogs=10 2024-11-19T01:12:00,155 INFO [master/5134ffc85563:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C41123%2C1731978719881.1731978720155 2024-11-19T01:12:00,169 INFO [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/WALs/5134ffc85563,41123,1731978719881/5134ffc85563%2C41123%2C1731978719881.1731978720155 2024-11-19T01:12:00,175 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38359:38359),(127.0.0.1/127.0.0.1:41895:41895)] 2024-11-19T01:12:00,181 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T01:12:00,182 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:12:00,182 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:12:00,182 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:12:00,188 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:12:00,190 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T01:12:00,190 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:12:00,190 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:12:00,190 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:12:00,192 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T01:12:00,192 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:12:00,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:12:00,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:12:00,195 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T01:12:00,195 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:12:00,196 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:12:00,196 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:12:00,197 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T01:12:00,197 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:12:00,199 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T01:12:00,199 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:12:00,200 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:12:00,200 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:12:00,201 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:12:00,201 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:12:00,202 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T01:12:00,204 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T01:12:00,210 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T01:12:00,211 INFO [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805148, jitterRate=0.023799240589141846}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T01:12:00,212 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731978720182Initializing all the Stores at 1731978720183 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978720183Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978720188 (+5 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978720188Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978720188Cleaning up temporary data from old regions at 1731978720201 (+13 ms)Region opened successfully at 1731978720212 (+11 ms) 2024-11-19T01:12:00,212 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T01:12:00,216 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@80c9392, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5134ffc85563/172.17.0.2:0 2024-11-19T01:12:00,217 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T01:12:00,217 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T01:12:00,217 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T01:12:00,217 INFO [master/5134ffc85563:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T01:12:00,218 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T01:12:00,218 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T01:12:00,218 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T01:12:00,227 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T01:12:00,228 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T01:12:00,230 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T01:12:00,230 INFO [master/5134ffc85563:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T01:12:00,231 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T01:12:00,232 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T01:12:00,233 INFO [master/5134ffc85563:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T01:12:00,234 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T01:12:00,235 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T01:12:00,236 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T01:12:00,237 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T01:12:00,240 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T01:12:00,241 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T01:12:00,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x101088ce6fa0001, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T01:12:00,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x101088ce6fa0001, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:12:00,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T01:12:00,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:12:00,243 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5134ffc85563,41123,1731978719881, sessionid=0x101088ce6fa0000, setting cluster-up flag (Was=false) 2024-11-19T01:12:00,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x101088ce6fa0001, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:12:00,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:12:00,253 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T01:12:00,255 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5134ffc85563,41123,1731978719881 2024-11-19T01:12:00,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x101088ce6fa0001, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:12:00,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:12:00,267 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T01:12:00,269 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5134ffc85563,41123,1731978719881 2024-11-19T01:12:00,271 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T01:12:00,272 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T01:12:00,273 INFO [master/5134ffc85563:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T01:12:00,273 INFO [master/5134ffc85563:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T01:12:00,273 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5134ffc85563,41123,1731978719881 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T01:12:00,275 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:12:00,275 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:12:00,275 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:12:00,275 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5134ffc85563:0, corePoolSize=5, maxPoolSize=5 2024-11-19T01:12:00,275 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5134ffc85563:0, corePoolSize=10, maxPoolSize=10 2024-11-19T01:12:00,275 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:12:00,275 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5134ffc85563:0, corePoolSize=2, maxPoolSize=2 2024-11-19T01:12:00,275 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:12:00,286 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:12:00,287 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T01:12:00,288 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:12:00,288 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T01:12:00,305 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731978750305 2024-11-19T01:12:00,306 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T01:12:00,306 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T01:12:00,306 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T01:12:00,306 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T01:12:00,306 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T01:12:00,306 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T01:12:00,314 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:00,321 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T01:12:00,321 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T01:12:00,321 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T01:12:00,324 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T01:12:00,324 INFO [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T01:12:00,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35247 is added to blk_1073741831_1007 (size=1321) 2024-11-19T01:12:00,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41825 is added to blk_1073741831_1007 (size=1321) 2024-11-19T01:12:00,335 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T01:12:00,336 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4 2024-11-19T01:12:00,341 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978720324,5,FailOnTimeoutGroup] 2024-11-19T01:12:00,341 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978720341,5,FailOnTimeoutGroup] 2024-11-19T01:12:00,341 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:00,341 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T01:12:00,341 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:00,341 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:00,349 INFO [RS:0;5134ffc85563:44605 {}] regionserver.HRegionServer(746): ClusterId : 32a26cdf-673b-42ba-8383-9e8551bb089f 2024-11-19T01:12:00,349 DEBUG [RS:0;5134ffc85563:44605 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T01:12:00,352 DEBUG [RS:0;5134ffc85563:44605 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T01:12:00,352 DEBUG [RS:0;5134ffc85563:44605 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T01:12:00,355 DEBUG [RS:0;5134ffc85563:44605 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T01:12:00,355 DEBUG [RS:0;5134ffc85563:44605 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42e490a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5134ffc85563/172.17.0.2:0 2024-11-19T01:12:00,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41825 is added to blk_1073741832_1008 (size=32) 2024-11-19T01:12:00,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35247 is added to blk_1073741832_1008 (size=32) 2024-11-19T01:12:00,359 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:12:00,365 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T01:12:00,367 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T01:12:00,367 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:12:00,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:12:00,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T01:12:00,369 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T01:12:00,369 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:12:00,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:12:00,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T01:12:00,371 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T01:12:00,371 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:12:00,371 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:12:00,372 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T01:12:00,373 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T01:12:00,373 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:12:00,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:12:00,374 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T01:12:00,374 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/data/hbase/meta/1588230740 2024-11-19T01:12:00,375 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/data/hbase/meta/1588230740 2024-11-19T01:12:00,376 DEBUG [RS:0;5134ffc85563:44605 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5134ffc85563:44605 2024-11-19T01:12:00,376 INFO [RS:0;5134ffc85563:44605 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T01:12:00,376 INFO [RS:0;5134ffc85563:44605 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T01:12:00,376 DEBUG [RS:0;5134ffc85563:44605 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T01:12:00,377 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T01:12:00,377 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T01:12:00,379 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T01:12:00,379 INFO [RS:0;5134ffc85563:44605 {}] regionserver.HRegionServer(2659): reportForDuty to master=5134ffc85563,41123,1731978719881 with port=44605, startcode=1731978719975 2024-11-19T01:12:00,379 DEBUG [RS:0;5134ffc85563:44605 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T01:12:00,380 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T01:12:00,406 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T01:12:00,407 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856708, jitterRate=0.08936107158660889}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T01:12:00,407 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40453, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T01:12:00,407 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731978720359Initializing all the Stores at 1731978720360 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978720360Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978720365 (+5 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978720365Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978720365Cleaning up temporary data from old regions at 1731978720377 (+12 ms)Region opened successfully at 1731978720407 (+30 ms) 2024-11-19T01:12:00,408 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T01:12:00,408 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T01:12:00,408 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T01:12:00,408 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T01:12:00,408 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T01:12:00,408 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41123 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5134ffc85563,44605,1731978719975 2024-11-19T01:12:00,408 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41123 {}] master.ServerManager(517): Registering regionserver=5134ffc85563,44605,1731978719975 2024-11-19T01:12:00,409 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T01:12:00,409 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731978720408Disabling compacts and flushes for region at 1731978720408Disabling writes for close at 1731978720408Writing region close event to WAL at 1731978720409 (+1 ms)Closed at 1731978720409 2024-11-19T01:12:00,410 DEBUG [RS:0;5134ffc85563:44605 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4 2024-11-19T01:12:00,410 DEBUG [RS:0;5134ffc85563:44605 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42207 2024-11-19T01:12:00,410 DEBUG [RS:0;5134ffc85563:44605 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T01:12:00,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T01:12:00,413 DEBUG [RS:0;5134ffc85563:44605 {}] zookeeper.ZKUtil(111): regionserver:44605-0x101088ce6fa0001, quorum=127.0.0.1:57881, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5134ffc85563,44605,1731978719975 2024-11-19T01:12:00,413 WARN [RS:0;5134ffc85563:44605 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T01:12:00,413 INFO [RS:0;5134ffc85563:44605 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:12:00,413 DEBUG [RS:0;5134ffc85563:44605 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/WALs/5134ffc85563,44605,1731978719975 2024-11-19T01:12:00,414 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:12:00,414 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5134ffc85563,44605,1731978719975] 2024-11-19T01:12:00,414 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T01:12:00,414 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T01:12:00,416 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T01:12:00,417 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T01:12:00,426 INFO [RS:0;5134ffc85563:44605 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T01:12:00,430 INFO [RS:0;5134ffc85563:44605 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T01:12:00,431 INFO [RS:0;5134ffc85563:44605 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T01:12:00,431 INFO [RS:0;5134ffc85563:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:00,431 INFO [RS:0;5134ffc85563:44605 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T01:12:00,432 INFO [RS:0;5134ffc85563:44605 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T01:12:00,432 INFO [RS:0;5134ffc85563:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:00,432 DEBUG [RS:0;5134ffc85563:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:12:00,432 DEBUG [RS:0;5134ffc85563:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:12:00,432 DEBUG [RS:0;5134ffc85563:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:12:00,432 DEBUG [RS:0;5134ffc85563:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:12:00,432 DEBUG [RS:0;5134ffc85563:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:12:00,432 DEBUG [RS:0;5134ffc85563:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5134ffc85563:0, corePoolSize=2, maxPoolSize=2 2024-11-19T01:12:00,432 DEBUG [RS:0;5134ffc85563:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:12:00,432 DEBUG [RS:0;5134ffc85563:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:12:00,432 DEBUG [RS:0;5134ffc85563:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:12:00,432 DEBUG [RS:0;5134ffc85563:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:12:00,432 DEBUG [RS:0;5134ffc85563:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:12:00,433 DEBUG [RS:0;5134ffc85563:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5134ffc85563:0, corePoolSize=1, maxPoolSize=1 2024-11-19T01:12:00,433 DEBUG [RS:0;5134ffc85563:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5134ffc85563:0, corePoolSize=3, maxPoolSize=3 2024-11-19T01:12:00,433 DEBUG [RS:0;5134ffc85563:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5134ffc85563:0, corePoolSize=3, maxPoolSize=3 2024-11-19T01:12:00,449 INFO [RS:0;5134ffc85563:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:00,449 INFO [RS:0;5134ffc85563:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:00,449 INFO [RS:0;5134ffc85563:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:00,449 INFO [RS:0;5134ffc85563:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:00,449 INFO [RS:0;5134ffc85563:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:00,449 INFO [RS:0;5134ffc85563:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,44605,1731978719975-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T01:12:00,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:12:00,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:12:00,474 INFO [RS:0;5134ffc85563:44605 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T01:12:00,474 INFO [RS:0;5134ffc85563:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,44605,1731978719975-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:00,474 INFO [RS:0;5134ffc85563:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:00,474 INFO [RS:0;5134ffc85563:44605 {}] regionserver.Replication(171): 5134ffc85563,44605,1731978719975 started 2024-11-19T01:12:00,493 INFO [RS:0;5134ffc85563:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:00,493 INFO [RS:0;5134ffc85563:44605 {}] regionserver.HRegionServer(1482): Serving as 5134ffc85563,44605,1731978719975, RpcServer on 5134ffc85563/172.17.0.2:44605, sessionid=0x101088ce6fa0001 2024-11-19T01:12:00,493 DEBUG [RS:0;5134ffc85563:44605 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T01:12:00,493 DEBUG [RS:0;5134ffc85563:44605 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5134ffc85563,44605,1731978719975 2024-11-19T01:12:00,493 DEBUG [RS:0;5134ffc85563:44605 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5134ffc85563,44605,1731978719975' 2024-11-19T01:12:00,493 DEBUG [RS:0;5134ffc85563:44605 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T01:12:00,495 DEBUG [RS:0;5134ffc85563:44605 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T01:12:00,496 DEBUG [RS:0;5134ffc85563:44605 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T01:12:00,496 DEBUG [RS:0;5134ffc85563:44605 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T01:12:00,496 DEBUG [RS:0;5134ffc85563:44605 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5134ffc85563,44605,1731978719975 2024-11-19T01:12:00,496 DEBUG [RS:0;5134ffc85563:44605 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5134ffc85563,44605,1731978719975' 2024-11-19T01:12:00,496 DEBUG [RS:0;5134ffc85563:44605 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T01:12:00,497 DEBUG [RS:0;5134ffc85563:44605 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T01:12:00,497 DEBUG [RS:0;5134ffc85563:44605 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T01:12:00,497 INFO [RS:0;5134ffc85563:44605 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T01:12:00,497 INFO [RS:0;5134ffc85563:44605 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T01:12:00,567 WARN [5134ffc85563:41123 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T01:12:00,600 INFO [RS:0;5134ffc85563:44605 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C44605%2C1731978719975, suffix=, logDir=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/WALs/5134ffc85563,44605,1731978719975, archiveDir=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/oldWALs, maxLogs=32 2024-11-19T01:12:00,600 INFO [RS:0;5134ffc85563:44605 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C44605%2C1731978719975.1731978720600 2024-11-19T01:12:00,622 INFO [RS:0;5134ffc85563:44605 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/WALs/5134ffc85563,44605,1731978719975/5134ffc85563%2C44605%2C1731978719975.1731978720600 2024-11-19T01:12:00,627 DEBUG [RS:0;5134ffc85563:44605 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41895:41895),(127.0.0.1/127.0.0.1:38359:38359)] 2024-11-19T01:12:00,818 DEBUG [5134ffc85563:41123 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T01:12:00,818 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5134ffc85563,44605,1731978719975 2024-11-19T01:12:00,820 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5134ffc85563,44605,1731978719975, state=OPENING 2024-11-19T01:12:00,821 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T01:12:00,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:12:00,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x101088ce6fa0001, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:12:00,824 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:12:00,824 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T01:12:00,824 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:12:00,824 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5134ffc85563,44605,1731978719975}] 2024-11-19T01:12:00,978 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T01:12:00,983 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49281, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T01:12:00,987 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T01:12:00,987 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:12:00,990 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5134ffc85563%2C44605%2C1731978719975.meta, suffix=.meta, logDir=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/WALs/5134ffc85563,44605,1731978719975, archiveDir=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/oldWALs, maxLogs=32 2024-11-19T01:12:00,992 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5134ffc85563%2C44605%2C1731978719975.meta.1731978720991.meta 2024-11-19T01:12:01,027 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/WALs/5134ffc85563,44605,1731978719975/5134ffc85563%2C44605%2C1731978719975.meta.1731978720991.meta 2024-11-19T01:12:01,037 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41895:41895),(127.0.0.1/127.0.0.1:38359:38359)] 2024-11-19T01:12:01,045 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T01:12:01,046 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T01:12:01,046 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T01:12:01,046 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T01:12:01,046 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T01:12:01,046 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T01:12:01,046 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T01:12:01,046 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T01:12:01,048 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T01:12:01,049 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T01:12:01,049 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:12:01,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:12:01,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T01:12:01,050 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T01:12:01,050 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:12:01,051 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:12:01,051 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T01:12:01,051 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T01:12:01,051 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:12:01,052 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:12:01,052 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T01:12:01,053 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T01:12:01,053 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T01:12:01,053 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T01:12:01,053 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T01:12:01,055 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/data/hbase/meta/1588230740 2024-11-19T01:12:01,057 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/data/hbase/meta/1588230740 2024-11-19T01:12:01,060 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T01:12:01,060 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T01:12:01,061 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T01:12:01,062 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T01:12:01,063 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=729650, jitterRate=-0.07220260798931122}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T01:12:01,063 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T01:12:01,064 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731978721046Writing region info on filesystem at 1731978721046Initializing all the Stores at 1731978721048 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978721048Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978721048Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731978721048Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731978721048Cleaning up temporary data from old regions at 1731978721060 (+12 ms)Running coprocessor post-open hooks at 1731978721063 (+3 ms)Region opened successfully at 1731978721064 (+1 ms) 2024-11-19T01:12:01,066 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731978720977 2024-11-19T01:12:01,070 DEBUG [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T01:12:01,070 INFO [RS_OPEN_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T01:12:01,070 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5134ffc85563,44605,1731978719975 2024-11-19T01:12:01,071 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5134ffc85563,44605,1731978719975, state=OPEN 2024-11-19T01:12:01,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x101088ce6fa0001, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T01:12:01,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T01:12:01,076 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5134ffc85563,44605,1731978719975 2024-11-19T01:12:01,076 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:12:01,076 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T01:12:01,081 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T01:12:01,081 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5134ffc85563,44605,1731978719975 in 252 msec 2024-11-19T01:12:01,084 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T01:12:01,084 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 667 msec 2024-11-19T01:12:01,085 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T01:12:01,085 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T01:12:01,089 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T01:12:01,089 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5134ffc85563,44605,1731978719975, seqNum=-1] 2024-11-19T01:12:01,089 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T01:12:01,091 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44821, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T01:12:01,100 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 826 msec 2024-11-19T01:12:01,100 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731978721100, completionTime=-1 2024-11-19T01:12:01,101 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T01:12:01,101 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T01:12:01,104 INFO [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T01:12:01,104 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731978781104 2024-11-19T01:12:01,105 INFO [master/5134ffc85563:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731978841104 2024-11-19T01:12:01,105 INFO [master/5134ffc85563:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 4 msec 2024-11-19T01:12:01,106 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,41123,1731978719881-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:01,106 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,41123,1731978719881-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:01,106 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,41123,1731978719881-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:01,106 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5134ffc85563:41123, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:01,106 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:01,106 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:01,108 DEBUG [master/5134ffc85563:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T01:12:01,112 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.062sec 2024-11-19T01:12:01,112 INFO [master/5134ffc85563:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T01:12:01,112 INFO [master/5134ffc85563:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T01:12:01,112 INFO [master/5134ffc85563:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T01:12:01,112 INFO [master/5134ffc85563:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T01:12:01,112 INFO [master/5134ffc85563:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T01:12:01,112 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,41123,1731978719881-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T01:12:01,112 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,41123,1731978719881-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T01:12:01,121 DEBUG [master/5134ffc85563:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T01:12:01,121 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T01:12:01,121 INFO [master/5134ffc85563:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5134ffc85563,41123,1731978719881-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T01:12:01,149 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f4fc7f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:12:01,149 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5134ffc85563,41123,-1 for getting cluster id 2024-11-19T01:12:01,150 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T01:12:01,151 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '32a26cdf-673b-42ba-8383-9e8551bb089f' 2024-11-19T01:12:01,152 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T01:12:01,152 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "32a26cdf-673b-42ba-8383-9e8551bb089f" 2024-11-19T01:12:01,152 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b988b68, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:12:01,152 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5134ffc85563,41123,-1] 2024-11-19T01:12:01,153 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T01:12:01,153 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:12:01,154 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52344, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T01:12:01,155 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@371fd414, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T01:12:01,155 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T01:12:01,156 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5134ffc85563,44605,1731978719975, seqNum=-1] 2024-11-19T01:12:01,157 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T01:12:01,158 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52142, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T01:12:01,160 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5134ffc85563,41123,1731978719881 2024-11-19T01:12:01,160 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T01:12:01,162 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T01:12:01,163 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T01:12:01,165 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/WALs/test.com,8080,1, archiveDir=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/oldWALs, maxLogs=32 2024-11-19T01:12:01,165 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731978721165 2024-11-19T01:12:01,175 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/WALs/test.com,8080,1/test.com%2C8080%2C1.1731978721165 2024-11-19T01:12:01,188 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38359:38359),(127.0.0.1/127.0.0.1:41895:41895)] 2024-11-19T01:12:01,201 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731978721201 2024-11-19T01:12:01,221 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,221 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,221 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,221 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,221 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,222 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/WALs/test.com,8080,1/test.com%2C8080%2C1.1731978721165 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/WALs/test.com,8080,1/test.com%2C8080%2C1.1731978721201 2024-11-19T01:12:01,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35247 is added to blk_1073741835_1011 (size=93) 2024-11-19T01:12:01,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41825 is added to blk_1073741835_1011 (size=93) 2024-11-19T01:12:01,235 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38359:38359),(127.0.0.1/127.0.0.1:41895:41895)] 2024-11-19T01:12:01,236 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/WALs/test.com,8080,1/test.com%2C8080%2C1.1731978721165 to hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/oldWALs/test.com%2C8080%2C1.1731978721165 2024-11-19T01:12:01,237 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,237 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,237 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,238 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,238 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35247 is added to blk_1073741836_1012 (size=93) 2024-11-19T01:12:01,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41825 is added to blk_1073741836_1012 (size=93) 2024-11-19T01:12:01,242 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/oldWALs 2024-11-19T01:12:01,242 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731978721201) 2024-11-19T01:12:01,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T01:12:01,242 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T01:12:01,242 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:12:01,242 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:12:01,242 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:12:01,242 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T01:12:01,242 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1860903685, stopped=false 2024-11-19T01:12:01,242 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5134ffc85563,41123,1731978719881 2024-11-19T01:12:01,242 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T01:12:01,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T01:12:01,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x101088ce6fa0001, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T01:12:01,244 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T01:12:01,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x101088ce6fa0001, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:12:01,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:12:01,245 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T01:12:01,245 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:12:01,245 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:12:01,245 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5134ffc85563,44605,1731978719975' ***** 2024-11-19T01:12:01,246 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T01:12:01,246 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44605-0x101088ce6fa0001, quorum=127.0.0.1:57881, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:12:01,246 INFO [RS:0;5134ffc85563:44605 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T01:12:01,246 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T01:12:01,246 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T01:12:01,246 INFO [RS:0;5134ffc85563:44605 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T01:12:01,246 INFO [RS:0;5134ffc85563:44605 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T01:12:01,246 INFO [RS:0;5134ffc85563:44605 {}] regionserver.HRegionServer(959): stopping server 5134ffc85563,44605,1731978719975 2024-11-19T01:12:01,246 INFO [RS:0;5134ffc85563:44605 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T01:12:01,246 INFO [RS:0;5134ffc85563:44605 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5134ffc85563:44605. 2024-11-19T01:12:01,246 DEBUG [RS:0;5134ffc85563:44605 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T01:12:01,246 DEBUG [RS:0;5134ffc85563:44605 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:12:01,246 INFO [RS:0;5134ffc85563:44605 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T01:12:01,249 INFO [RS:0;5134ffc85563:44605 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T01:12:01,249 INFO [RS:0;5134ffc85563:44605 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T01:12:01,249 INFO [RS:0;5134ffc85563:44605 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T01:12:01,249 INFO [RS:0;5134ffc85563:44605 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-19T01:12:01,249 DEBUG [RS:0;5134ffc85563:44605 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-19T01:12:01,249 DEBUG [RS:0;5134ffc85563:44605 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-19T01:12:01,249 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T01:12:01,249 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T01:12:01,249 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T01:12:01,249 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T01:12:01,249 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T01:12:01,250 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-19T01:12:01,271 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/data/hbase/meta/1588230740/.tmp/ns/2ea33a46a37e4086bf40fe11df588aff is 43, key is default/ns:d/1731978721092/Put/seqid=0 2024-11-19T01:12:01,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35247 is added to blk_1073741837_1013 (size=5153) 2024-11-19T01:12:01,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41825 is added to blk_1073741837_1013 (size=5153) 2024-11-19T01:12:01,279 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/data/hbase/meta/1588230740/.tmp/ns/2ea33a46a37e4086bf40fe11df588aff 2024-11-19T01:12:01,286 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/data/hbase/meta/1588230740/.tmp/ns/2ea33a46a37e4086bf40fe11df588aff as hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/data/hbase/meta/1588230740/ns/2ea33a46a37e4086bf40fe11df588aff 2024-11-19T01:12:01,290 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/data/hbase/meta/1588230740/ns/2ea33a46a37e4086bf40fe11df588aff, entries=2, sequenceid=6, filesize=5.0 K 2024-11-19T01:12:01,292 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 42ms, sequenceid=6, compaction requested=false 2024-11-19T01:12:01,292 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T01:12:01,297 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-19T01:12:01,298 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T01:12:01,299 INFO [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T01:12:01,299 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731978721249Running coprocessor pre-close hooks at 1731978721249Disabling compacts and flushes for region at 1731978721249Disabling writes for close at 1731978721249Obtaining lock to block concurrent updates at 1731978721250 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731978721250Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731978721250Flushing stores of hbase:meta,,1.1588230740 at 1731978721251 (+1 ms)Flushing 1588230740/ns: creating writer at 1731978721251Flushing 1588230740/ns: appending metadata at 1731978721271 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1731978721271Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27f150b6: reopening flushed file at 1731978721285 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 42ms, sequenceid=6, compaction requested=false at 1731978721292 (+7 ms)Writing region close event to WAL at 1731978721293 (+1 ms)Running coprocessor post-close hooks at 1731978721298 (+5 ms)Closed at 1731978721299 (+1 ms) 2024-11-19T01:12:01,299 DEBUG [RS_CLOSE_META-regionserver/5134ffc85563:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T01:12:01,449 INFO [RS:0;5134ffc85563:44605 {}] regionserver.HRegionServer(976): stopping server 5134ffc85563,44605,1731978719975; all regions closed. 2024-11-19T01:12:01,450 INFO [regionserver/5134ffc85563:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T01:12:01,450 INFO [regionserver/5134ffc85563:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T01:12:01,450 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,450 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,450 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,450 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,450 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41825 is added to blk_1073741834_1010 (size=1152) 2024-11-19T01:12:01,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35247 is added to blk_1073741834_1010 (size=1152) 2024-11-19T01:12:01,455 DEBUG [RS:0;5134ffc85563:44605 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/oldWALs 2024-11-19T01:12:01,455 INFO [RS:0;5134ffc85563:44605 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5134ffc85563%2C44605%2C1731978719975.meta:.meta(num 1731978720991) 2024-11-19T01:12:01,456 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,456 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,456 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,456 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,456 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41825 is added to blk_1073741833_1009 (size=93) 2024-11-19T01:12:01,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35247 is added to blk_1073741833_1009 (size=93) 2024-11-19T01:12:01,461 DEBUG [RS:0;5134ffc85563:44605 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/oldWALs 2024-11-19T01:12:01,461 INFO [RS:0;5134ffc85563:44605 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5134ffc85563%2C44605%2C1731978719975:(num 1731978720600) 2024-11-19T01:12:01,461 DEBUG [RS:0;5134ffc85563:44605 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T01:12:01,461 INFO [RS:0;5134ffc85563:44605 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T01:12:01,461 INFO [RS:0;5134ffc85563:44605 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T01:12:01,462 INFO [RS:0;5134ffc85563:44605 {}] hbase.ChoreService(370): Chore service for: regionserver/5134ffc85563:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T01:12:01,462 INFO [RS:0;5134ffc85563:44605 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T01:12:01,462 INFO [regionserver/5134ffc85563:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T01:12:01,462 INFO [RS:0;5134ffc85563:44605 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44605 2024-11-19T01:12:01,464 INFO [RS:0;5134ffc85563:44605 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T01:12:01,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T01:12:01,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x101088ce6fa0001, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5134ffc85563,44605,1731978719975 2024-11-19T01:12:01,465 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5134ffc85563,44605,1731978719975] 2024-11-19T01:12:01,467 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5134ffc85563,44605,1731978719975 already deleted, retry=false 2024-11-19T01:12:01,467 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5134ffc85563,44605,1731978719975 expired; onlineServers=0 2024-11-19T01:12:01,467 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5134ffc85563,41123,1731978719881' ***** 2024-11-19T01:12:01,467 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T01:12:01,467 INFO [M:0;5134ffc85563:41123 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T01:12:01,467 INFO [M:0;5134ffc85563:41123 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T01:12:01,467 DEBUG [M:0;5134ffc85563:41123 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T01:12:01,467 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T01:12:01,467 DEBUG [M:0;5134ffc85563:41123 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T01:12:01,467 DEBUG [master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978720324 {}] cleaner.HFileCleaner(306): Exit Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.large.0-1731978720324,5,FailOnTimeoutGroup] 2024-11-19T01:12:01,467 INFO [M:0;5134ffc85563:41123 {}] hbase.ChoreService(370): Chore service for: master/5134ffc85563:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T01:12:01,468 INFO [M:0;5134ffc85563:41123 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T01:12:01,468 DEBUG [M:0;5134ffc85563:41123 {}] master.HMaster(1795): Stopping service threads 2024-11-19T01:12:01,468 INFO [M:0;5134ffc85563:41123 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T01:12:01,468 INFO [M:0;5134ffc85563:41123 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T01:12:01,468 INFO [M:0;5134ffc85563:41123 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T01:12:01,468 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T01:12:01,467 DEBUG [master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978720341 {}] cleaner.HFileCleaner(306): Exit Thread[master/5134ffc85563:0:becomeActiveMaster-HFileCleaner.small.0-1731978720341,5,FailOnTimeoutGroup] 2024-11-19T01:12:01,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T01:12:01,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T01:12:01,469 DEBUG [M:0;5134ffc85563:41123 {}] zookeeper.ZKUtil(347): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T01:12:01,469 WARN [M:0;5134ffc85563:41123 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T01:12:01,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,46771,1731978533954/5134ffc85563%2C46771%2C1731978533954.meta.1731978534873.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:12:01,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40963/user/jenkins/test-data/3ca64890-7b89-2860-3e8d-48ea8649ee7c/WALs/5134ffc85563,43549,1731978535122/5134ffc85563%2C43549%2C1731978535122.1731978535398 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T01:12:01,470 INFO [M:0;5134ffc85563:41123 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/.lastflushedseqids 2024-11-19T01:12:01,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35247 is added to blk_1073741838_1014 (size=99) 2024-11-19T01:12:01,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41825 is added to blk_1073741838_1014 (size=99) 2024-11-19T01:12:01,485 INFO [M:0;5134ffc85563:41123 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T01:12:01,485 INFO [M:0;5134ffc85563:41123 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T01:12:01,485 DEBUG [M:0;5134ffc85563:41123 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T01:12:01,486 INFO [M:0;5134ffc85563:41123 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:12:01,486 DEBUG [M:0;5134ffc85563:41123 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:12:01,486 DEBUG [M:0;5134ffc85563:41123 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T01:12:01,486 DEBUG [M:0;5134ffc85563:41123 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:12:01,486 INFO [M:0;5134ffc85563:41123 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-19T01:12:01,507 DEBUG [M:0;5134ffc85563:41123 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9705321df8fd43a58e13a9cc69296920 is 82, key is hbase:meta,,1/info:regioninfo/1731978721070/Put/seqid=0 2024-11-19T01:12:01,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41825 is added to blk_1073741839_1015 (size=5672) 2024-11-19T01:12:01,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35247 is added to blk_1073741839_1015 (size=5672) 2024-11-19T01:12:01,520 INFO [M:0;5134ffc85563:41123 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9705321df8fd43a58e13a9cc69296920 2024-11-19T01:12:01,552 DEBUG [M:0;5134ffc85563:41123 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a640442e30a84bac872c326327cecb52 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731978721099/Put/seqid=0 2024-11-19T01:12:01,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35247 is added to blk_1073741840_1016 (size=5275) 2024-11-19T01:12:01,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41825 is added to blk_1073741840_1016 (size=5275) 2024-11-19T01:12:01,560 INFO [M:0;5134ffc85563:41123 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a640442e30a84bac872c326327cecb52 2024-11-19T01:12:01,566 INFO [RS:0;5134ffc85563:44605 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T01:12:01,566 INFO [RS:0;5134ffc85563:44605 {}] regionserver.HRegionServer(1031): Exiting; stopping=5134ffc85563,44605,1731978719975; zookeeper connection closed. 2024-11-19T01:12:01,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x101088ce6fa0001, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:12:01,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x101088ce6fa0001, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:12:01,566 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@39d198a5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@39d198a5 2024-11-19T01:12:01,566 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T01:12:01,581 DEBUG [M:0;5134ffc85563:41123 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/399ad4034f3743fc83e06ec996ce7d29 is 69, key is 5134ffc85563,44605,1731978719975/rs:state/1731978720408/Put/seqid=0 2024-11-19T01:12:01,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41825 is added to blk_1073741841_1017 (size=5156) 2024-11-19T01:12:01,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35247 is added to blk_1073741841_1017 (size=5156) 2024-11-19T01:12:01,588 INFO [M:0;5134ffc85563:41123 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/399ad4034f3743fc83e06ec996ce7d29 2024-11-19T01:12:01,611 DEBUG [M:0;5134ffc85563:41123 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/15203cc1c7fd4a4aa498dd8d0635ee3d is 52, key is load_balancer_on/state:d/1731978721161/Put/seqid=0 2024-11-19T01:12:01,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35247 is added to blk_1073741842_1018 (size=5056) 2024-11-19T01:12:01,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41825 is added to blk_1073741842_1018 (size=5056) 2024-11-19T01:12:01,617 INFO [M:0;5134ffc85563:41123 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/15203cc1c7fd4a4aa498dd8d0635ee3d 2024-11-19T01:12:01,624 DEBUG [M:0;5134ffc85563:41123 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9705321df8fd43a58e13a9cc69296920 as hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9705321df8fd43a58e13a9cc69296920 2024-11-19T01:12:01,630 INFO [M:0;5134ffc85563:41123 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9705321df8fd43a58e13a9cc69296920, entries=8, sequenceid=29, filesize=5.5 K 2024-11-19T01:12:01,631 DEBUG [M:0;5134ffc85563:41123 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a640442e30a84bac872c326327cecb52 as hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a640442e30a84bac872c326327cecb52 2024-11-19T01:12:01,638 INFO [M:0;5134ffc85563:41123 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a640442e30a84bac872c326327cecb52, entries=3, sequenceid=29, filesize=5.2 K 2024-11-19T01:12:01,639 DEBUG [M:0;5134ffc85563:41123 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/399ad4034f3743fc83e06ec996ce7d29 as hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/399ad4034f3743fc83e06ec996ce7d29 2024-11-19T01:12:01,644 INFO [M:0;5134ffc85563:41123 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/399ad4034f3743fc83e06ec996ce7d29, entries=1, sequenceid=29, filesize=5.0 K 2024-11-19T01:12:01,645 DEBUG [M:0;5134ffc85563:41123 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/15203cc1c7fd4a4aa498dd8d0635ee3d as hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/15203cc1c7fd4a4aa498dd8d0635ee3d 2024-11-19T01:12:01,649 INFO [M:0;5134ffc85563:41123 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42207/user/jenkins/test-data/9d5479f0-b059-2700-77db-1ea9f857f9c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/15203cc1c7fd4a4aa498dd8d0635ee3d, entries=1, sequenceid=29, filesize=4.9 K 2024-11-19T01:12:01,651 INFO [M:0;5134ffc85563:41123 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 164ms, sequenceid=29, compaction requested=false 2024-11-19T01:12:01,653 INFO [M:0;5134ffc85563:41123 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T01:12:01,653 DEBUG [M:0;5134ffc85563:41123 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731978721485Disabling compacts and flushes for region at 1731978721485Disabling writes for close at 1731978721486 (+1 ms)Obtaining lock to block concurrent updates at 1731978721486Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731978721486Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731978721486Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731978721487 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731978721487Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731978721506 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731978721506Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731978721531 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731978721551 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731978721551Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731978721565 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731978721581 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731978721581Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731978721594 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731978721610 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731978721610Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f4592dd: reopening flushed file at 1731978721623 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4de6f80d: reopening flushed file at 1731978721630 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48aeb00e: reopening flushed file at 1731978721638 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@fdcdc76: reopening flushed file at 1731978721644 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 164ms, sequenceid=29, compaction requested=false at 1731978721651 (+7 ms)Writing region close event to WAL at 1731978721652 (+1 ms)Closed at 1731978721652 2024-11-19T01:12:01,653 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,653 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,653 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,653 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,653 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T01:12:01,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35247 is added to blk_1073741830_1006 (size=10311) 2024-11-19T01:12:01,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41825 is added to blk_1073741830_1006 (size=10311) 2024-11-19T01:12:01,656 INFO [M:0;5134ffc85563:41123 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T01:12:01,656 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T01:12:01,656 INFO [M:0;5134ffc85563:41123 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41123 2024-11-19T01:12:01,656 INFO [M:0;5134ffc85563:41123 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T01:12:01,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:12:01,759 INFO [M:0;5134ffc85563:41123 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T01:12:01,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41123-0x101088ce6fa0000, quorum=127.0.0.1:57881, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T01:12:01,762 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b0e389f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:12:01,762 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@56063e0c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:12:01,762 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:12:01,763 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e44754{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:12:01,763 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2578bc63{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/hadoop.log.dir/,STOPPED} 2024-11-19T01:12:01,764 WARN [BP-1348549008-172.17.0.2-1731978718850 heartbeating to localhost/127.0.0.1:42207 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:12:01,764 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:12:01,764 WARN [BP-1348549008-172.17.0.2-1731978718850 heartbeating to localhost/127.0.0.1:42207 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1348549008-172.17.0.2-1731978718850 (Datanode Uuid 0168b7ac-b570-4fbb-9c05-4eedfa4b73a7) service to localhost/127.0.0.1:42207 2024-11-19T01:12:01,764 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:12:01,765 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/cluster_ea19eb7b-05d0-a961-de93-b1e4352e932d/data/data3/current/BP-1348549008-172.17.0.2-1731978718850 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:12:01,765 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/cluster_ea19eb7b-05d0-a961-de93-b1e4352e932d/data/data4/current/BP-1348549008-172.17.0.2-1731978718850 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:12:01,765 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:12:01,767 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7e58a9be{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T01:12:01,767 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3910812a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:12:01,767 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:12:01,768 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@717a950c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:12:01,768 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d148abe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/hadoop.log.dir/,STOPPED} 2024-11-19T01:12:01,769 WARN [BP-1348549008-172.17.0.2-1731978718850 heartbeating to localhost/127.0.0.1:42207 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T01:12:01,769 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T01:12:01,769 WARN [BP-1348549008-172.17.0.2-1731978718850 heartbeating to localhost/127.0.0.1:42207 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1348549008-172.17.0.2-1731978718850 (Datanode Uuid 75d63ec3-eb6e-4885-84e6-42e4823b2c8f) service to localhost/127.0.0.1:42207 2024-11-19T01:12:01,769 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T01:12:01,770 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/cluster_ea19eb7b-05d0-a961-de93-b1e4352e932d/data/data1/current/BP-1348549008-172.17.0.2-1731978718850 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:12:01,770 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/cluster_ea19eb7b-05d0-a961-de93-b1e4352e932d/data/data2/current/BP-1348549008-172.17.0.2-1731978718850 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T01:12:01,770 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T01:12:01,778 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@470d812e{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T01:12:01,779 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a6fb517{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T01:12:01,779 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T01:12:01,779 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72d15c59{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T01:12:01,779 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ee125cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3e5f033e-d9b2-a47f-64bf-970b51ed7c5b/hadoop.log.dir/,STOPPED} 2024-11-19T01:12:01,787 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T01:12:01,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T01:12:01,817 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=268 (was 229) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42207 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42207 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42207 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42207 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42207 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42207 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42207 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:42207 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=537 (was 512) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=337 (was 337), ProcessCount=11 (was 11), AvailableMemoryMB=3633 (was 3647)